prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import argparse
import logging
import os
# Keras
from keras.callbacks import EarlyStopping
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense
from keras.regularizers import l1, l2
# Numpy
import numpy as np
#Pandas
import pandas as pd
# sklearn and scipy
from sklearn.model_selection import train_test_split
from scipy.stats import pearsonr as r
from GA.utils.utils import retrieve_data
from GA.evolve_cnn.train import compile_model_cnn
from GA.evolve.train import compile_model_mlp
from GA.utils.utils import convert_to_individual_alleles
early_stopper = EarlyStopping(monitor='val_loss', min_delta=0.1, patience=2, verbose=0, mode='auto')
def CNN(traits=['height', 'BMI', 'WHR', 'BHMD', 'SBP'], verbose=0, unif=False, nbsnps=10000, p=None, reps=1):
#cnn1
param = list({'optimizer': 'nadam', 'size_window': 2, 'activation': 'softplus', 'nb_neurons': 64, 'stride': 'one',
'nb_cnn_layers': 1, 'filters': 16, 'weight_decay': 0.0, 'nb_layers': 3,
'dropout': 0.01, 'batch_norm': True})
#cnn2
param.append({'optimizer': 'nadam', 'size_window': 2, 'activation': 'elu', 'nb_neurons': 32, 'stride': 'one',
'nb_cnn_layers': 1, 'filters': 32, 'weight_decay': 0.0, 'nb_layers': 3,
'dropout': 0.01, 'batch_norm': False})
#cnn3
param.append({'optimizer': 'rmsprop', 'size_window': 3, 'activation': 'linear', 'nb_neurons': 32, 'stride': 'one',
'nb_cnn_layers': 1, 'filters': 16, 'weight_decay': 0.0, 'nb_layers': 1,
'dropout': 0.01, 'batch_norm': False})
R = {}
for t in traits:
best = 0
print(t)
x_tr, x_tst, y_tr, y_tst = retrieve_data(t, nbsnps, unif=unif)
x_tr, x_val, y_tr, y_val = train_test_split(x_tr, y_tr, test_size=0.33)
n_snps = x_tr.shape[1]
x_tr = np.expand_dims(x_tr, axis=2)
x_val = np.expand_dims(x_val, axis=2)
x_tst = np.expand_dims(x_tst, axis=2)
f = os.path.join(os.path.expanduser("~"), 'Code/genomic_cnn/models', "Model_" + t + "_cnn_"
+ str(n_snps / 1000) + "k" + ("_unif" if unif else "_best") + ".h5")
n = 0
if p is None:
res = np.zeros((len(param), 2))
for g in param:
print(g)
for x in range(0, reps):
m = compile_model_cnn(g, (n_snps, 1))
m.fit(x_tr, y_tr, epochs=1200, verbose=verbose, validation_data=(x_val, y_val),
callbacks = [early_stopper])
if r(m.predict(x_val).ravel(), y_val)[0] > res[n, 0]:
print(r(m.predict(x_val).ravel(), y_val)[0])
print(x)
res[n, 0] = r(m.predict(x_val).ravel(), y_val)[0]
res[n, 1] = r(m.predict(x_tst).ravel(), y_tst)[0]
if res[n, 0] > best:
print("A better network was found with r: %.3f" % res[n, 0])
print(g)
m.save(f)
best = res[n, 0]
n = n + 1
else:
res = np.zeros((reps, 2))
g = param[p]
for i in range(0, reps):
m = compile_model_cnn(g, (n_snps, 1))
m.fit(x_tr, y_tr, epochs=1200, verbose=verbose, validation_data=(x_val, y_val),callbacks=[early_stopper])
res[i, :] = (r(m.predict(x_val).ravel(), y_val)[0], r(m.predict(x_tst).ravel(), y_tst)[0])
R[t+"_tr"] = res[:, 0]
R[t+"_tst"] = res[:, 1]
print(pd.DataFrame(R).to_csv(float_format='%.3f', index=False))
logging.info(pd.DataFrame(R).to_csv(float_format='%.3f', index=False))
def MLP(traits=['height', 'BMI', 'WHR', 'BHMD', 'SBP'], verbose=0, unif=False, nbsnps=10000, p=None, reps=1, hot=False):
#mlp1
geneparam = list({'optimizer': 'rmsprop', 'activation': 'elu', 'nb_neurons': 32,
'weight_decay': 0.01, 'nb_layers': 1, 'dropout': 0.02})
# mlp2
geneparam.append({'optimizer': 'adagrad', 'activation': 'elu', 'nb_neurons': 64, 'weight_decay': 0.01,
'nb_layers': 2, 'dropout': 0.03})
# mlp3
geneparam.append({'optimizer': 'adam', 'activation': 'softplus', 'nb_neurons': 32,
'weight_decay': 0.01, 'nb_layers': 5, 'dropout': 0.02})
R = {}
for t in traits:
print(t)
best = 0
x_tr, x_tst, y_tr, y_tst = retrieve_data(t, nbsnps, unif=unif)
x_tr, x_val, y_tr, y_val = train_test_split(x_tr, y_tr, test_size=0.33)
if hot:
x_tr = convert_to_individual_alleles(x_tr)
x_val = convert_to_individual_alleles(x_val)
x_tst = convert_to_individual_alleles(x_tst)
n_snps = x_tr.shape[1]
f = os.path.join(os.path.expanduser("~"), 'Code/genomic_cnn/models',
"Model_" + t + "_mlp_" + str(n_snps / 1000) \
+ "kHot" + ("_unif" if unif else "_best") + ".h5")
else:
n_snps = x_tr.shape[1]
f = os.path.join(os.path.expanduser("~"), 'Code/genomic_cnn/models', "Model_" + t + "_mlp_"
+ str(n_snps / 1000) + "k" + ("_unif" if unif else "_best") + ".h5")
n = 0
if p is None:
res = np.zeros((len(geneparam), 2))
for g in geneparam:
print(g)
for x in range(0, reps):
m = compile_model_mlp(g, n_snps)
m.fit(x_tr, y_tr, epochs=1200, validation_data=(x_val, y_val), callbacks=[early_stopper], verbose=verbose)
if r(m.predict(x_val).ravel(), y_val)[0] > res[n, 0]:
print(r(m.predict(x_val).ravel(), y_val)[0])
print(x)
res[n, 0] = r(m.predict(x_val).ravel(), y_val)[0]
res[n, 1] = r(m.predict(x_tst).ravel(), y_tst)[0]
if res[n, 0] > best:
print("A better network was found with r: %.3f" % res[n,0])
print(g)
m.save(f)
best = res[n, 0]
K.clear_session()
n = n + 1
else:
res = np.zeros((reps, 2))
g = geneparam[p]
for i in range(0, reps):
m = compile_model_mlp(g, n_snps)
m.fit(x_tr, y_tr, epochs=1200, verbose=verbose, validation_data=(x_val, y_val),
callbacks=[early_stopper])
res[i, :] = (r(m.predict(x_val).ravel(), y_val)[0], r(m.predict(x_tst).ravel(), y_tst)[0])
R[t + "_tr"] = res[:, 0]
R[t + "_tst"] = res[:, 1]
print(pd.DataFrame(R).to_csv(float_format='%.3f', index=False))
logging.info(pd.DataFrame(R).to_csv(float_format='%.3f', index=False))
def lin_models(lasso=True, traits=['height', 'BMI', 'WHR', 'BHMD', 'SBP'], nbsnps=10000,verbose=0, hot=False, unif=False, reps=1):
alpha = [0.01]
R = {}
for t in traits:
print(t)
x_tr, x_tst, y_tr, y_tst = retrieve_data(t, nbsnps, unif=unif)
x_tr, x_val, y_tr, y_val = train_test_split(x_tr, y_tr, test_size=0.33)
if hot:
x_tr = convert_to_individual_alleles(x_tr)
x_val = convert_to_individual_alleles(x_val)
x_tst = convert_to_individual_alleles(x_tst)
nb_snps = x_tr.shape[1]
res = np.zeros((len(alpha), 3))
n = 0
for a in alpha:
print(a)
for i in range(0,reps):
m = Sequential()
if lasso:
m.add(Dense(1, input_dim=nb_snps,kernel_regularizer=l1(a)))
else:
m.add(Dense(1, input_dim=nb_snps, kernel_regularizer=l2(a)))
m.compile(loss='mse', optimizer='adam')
m.fit(x_tr, y_tr, epochs=1000, callbacks=[EarlyStopping()], validation_data=(x_val, y_val), verbose=verbose)
if r(m.predict(x_val).ravel(), y_val)[0] > res[n, 0]:
print(r(m.predict(x_val).ravel(), y_val)[0])
print(i)
res[n, 0] = r(m.predict(x_val).ravel(), y_val)[0]
res[n, 1] = r(m.predict(x_tst).ravel(), y_tst)[0]
K.clear_session()
print(res[n, 1])
n += 1
R[t+"val"] = res[:, 0]
R[t+"tst"] = res[:, 1]
R["alpha"] = alpha
print( | pd.DataFrame(R) | pandas.DataFrame |
from os.path import join
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
from src import utils as cutil
def convert_non_monotonic_to_nan(array):
"""Converts a numpy array to a monotonically increasing one.
Args:
array (numpy.ndarray [N,]): input array
Returns:
numpy.ndarray [N,]: some values marked as missing, all non-missing
values should be monotonically increasing
Usage:
>>> convert_non_monotonic_to_nan(np.array([0, 0, 5, 3, 4, 6, 3, 7, 6, 7, 8]))
np.array([ 0., 0., np.nan, 3., np.nan, np.nan, 3., np.nan, 6., 7., 8.])
"""
keep = np.arange(0, len(array))
is_monotonic = False
while not is_monotonic:
is_monotonic_array = np.hstack(
(array[keep][1:] >= array[keep][:-1], np.array(True))
)
is_monotonic = is_monotonic_array.all()
keep = keep[is_monotonic_array]
out_array = np.full_like(array.astype(np.float), np.nan)
out_array[keep] = array[keep]
return out_array
def log_interpolate(array):
"""Interpolates assuming log growth.
Args:
array (numpy.ndarray [N,]): input array with missing values
Returns:
numpy.ndarray [N,]: all missing values will be filled
Usage:
>>> log_interpolate(np.array([0, np.nan, 2, np.nan, 4, 6, np.nan, 7, 8]))
np.array([0, 0, 2, 3, 4, 6, 7, 7, 8])
"""
idx = np.arange(0, len(array))
log_array = np.log(array.astype(np.float32) + 1e-1)
interp_array = np.interp(
x=idx, xp=idx[~np.isnan(array)], fp=log_array[~np.isnan(array)]
)
return np.round(np.exp(interp_array)).astype(int)
DATA_CHINA = cutil.DATA_RAW / "china"
health_dxy_file = join(DATA_CHINA, "DXYArea.csv")
health_jan_file = join(DATA_CHINA, "china_city_health_jan.xlsx")
policy_file = join(DATA_CHINA, "CHN_policy_data_sources.csv")
pop_file = join(DATA_CHINA, "china_city_pop.csv")
output_file = cutil.DATA_PROCESSED / "adm2" / "CHN_processed.csv"
match_file = join(DATA_CHINA, "match_china_city_name_w_adm2.csv")
shp_file = cutil.DATA_INTERIM / "adm" / "adm2" / "adm2.shp"
end_date_file = cutil.CODE / "data" / "cutoff_dates.csv"
end_date = pd.read_csv(end_date_file)
(end_date,) = end_date.loc[end_date["tag"] == "default", "end_date"].values
end_date = str(end_date)
print("End Date: ", end_date)
## Load and clean pre 01/24 data
# load pre 01/24 data
df_jan = pd.read_excel(health_jan_file, sheet_name=None)
# process pre 1/24 data
df_jan_merged = pd.DataFrame(columns=["adm0_name", "adm1_name", "adm2_name", "date"])
for old_col, new_col in zip(
["confirmed", "death", "recovery"],
["cum_confirmed_cases", "cum_deaths", "cum_recoveries"],
):
melted = (
df_jan[old_col]
.melt(
id_vars=["adm0_name", "adm1_name", "adm2_name"],
var_name="date",
value_name=new_col,
)
.dropna()
)
df_jan_merged = pd.merge(
df_jan_merged,
melted,
how="outer",
on=["adm0_name", "adm1_name", "adm2_name", "date"],
)
df_jan_merged = df_jan_merged.loc[df_jan_merged["adm2_name"] != "Unknown", :]
## Load and clean main data (scraped), harmonize city names
# data downloaded from
# https://github.com/BlankerL/DXY-COVID-19-Data
df = pd.read_csv(health_dxy_file)
# drop aggregates and cases in other countries
df = df.loc[df["countryEnglishName"] == "China", :]
df = df.loc[df["cityName"].notna(), :]
# df.describe(include='all') # quick summary
# df['provinceName'].unique() # looks clean
# df['provinceEnglishName'].unique() # looks clean
# df['cityName'].unique() # looks messy, will keep raw data
# # check unique English name for obs with the same Chinese cityName
# for cn_name, group in df.groupby(['provinceName', 'cityName']):
# en_name = group['cityEnglishName'].unique()
# if len(en_name) > 1:
# print(cn_name)
# print(en_name)
# print(group['cityEnglishName'].shape)
# print(group['cityEnglishName'].value_counts())
# # check all english city names
# for en_name, _ in df.groupby(['provinceEnglishName', 'cityEnglishName']):
# print(en_name)
# # check all chinese city names
# for cn_name, _ in df.groupby(['provinceName', 'cityName']):
# print(cn_name)
# set and sort index
df = df.set_index(["provinceName", "cityName"]).sort_index()
# record notes
df.loc[:, "notes"] = np.nan
# recode city English names based on Chinese names
cityEnglishName_dict = {
# 'provinceName', 'cityName': 'cityEnglishName', 'assignedToCity'
# for prisons
("浙江省", "省十里丰监狱"): ("Shilifeng Prison", "prison"),
("山东省", "任城监狱"): ("Rencheng Prison", "prison"),
("湖北省", "监狱系统"): ("Prison", "prison"),
# for harmonizing names
("四川省", "凉山"): ("Liangshan Yi Autonomous Prefecture", np.nan),
("四川省", "凉山州"): ("Liangshan Yi Autonomous Prefecture", np.nan),
# for imported cases
(None, "境外输入人员"): ("International Imported Cases", "imported"),
(None, "外地来沪人员"): ("Domestic Imported Cases", "imported"),
(None, "武汉来京人员"): ("Domestic Imported Cases", "imported"),
(None, "外地来京人员"): ("Domestic Imported Cases", "imported"),
(None, "外地来津"): ("Domestic Imported Cases", "imported"),
(None, "外地来津人员"): ("Domestic Imported Cases", "imported"),
(None, "外地来穗人员"): ("Domestic Imported Cases", "imported"),
(None, "外地来粤人员"): ("Domestic Imported Cases", "imported"),
# for unknown
(None, "待明确地区"): ("Unknown", "unknown"),
(None, "未明确地区"): ("Unknown", "unknown"),
(None, "未知"): ("Unknown", "unknown"),
(None, "未知地区"): ("Unknown", "unknown"),
(None, "不明地区"): ("Unknown", "unknown"),
(None, "未明确地区"): ("Unknown", "unknown"),
(None, "待明确"): ("Unknown", "unknown"),
}
# clean up cityEnglishName
for cn_name, values in cityEnglishName_dict.items():
cn_name = tuple(slice(s) if s is None else s for s in cn_name)
df.loc[cn_name, ["cityEnglishName", "notes"]] = values
# # check remaining missing values
# df.loc[df['cityEnglishName'].isna(), :].index.unique().tolist()
# add new admin level
df.loc[:, "adm3_name"] = "N/A"
# recode city English names based on Chinese names
cityEnglishName_dict = {
("上海市", "金山"): "Jinshan District",
("云南省", "红河"): "Honghe",
("云南省", "西双版纳州"): "Xishuangbanna",
("内蒙古自治区", "赤峰市松山区"): ("Chifeng", "Songshan"),
("内蒙古自治区", "赤峰市林西县"): ("Chifeng", "Linxi"),
("内蒙古自治区", "通辽市经济开发区"): "Tongliao",
("内蒙古自治区", "鄂尔多斯东胜区"): ("Ordos", "Dongsheng"),
("内蒙古自治区", "鄂尔多斯鄂托克前旗"): ("Ordos", "Etuokeqianqi"),
("内蒙古自治区", "锡林郭勒"): "Xilingol League",
("内蒙古自治区", "锡林郭勒盟"): "Xilingol League",
("内蒙古自治区", "锡林郭勒盟二连浩特"): ("Xilingol League", "Erlianhaote"),
("内蒙古自治区", "锡林郭勒盟锡林浩特"): ("Xilingol League", "Xilinhaote"),
("北京市", "石景山"): "Shijingshan District",
("北京市", "西城"): "Xicheng District",
("北京市", "通州"): "Tongzhou District",
("北京市", "门头沟"): "Mentougou District",
("北京市", "顺义"): "Shunyi District",
(
"新疆维吾尔自治区",
"石河子",
): "Shihezi, Xinjiang Production and Construction Corps 8th Division",
("新疆维吾尔自治区", "第七师"): "Xinjiang Production and Construction Corps 7th Division",
("新疆维吾尔自治区", "第九师"): "Xinjiang Production and Construction Corps 9th Division",
(
"新疆维吾尔自治区",
"第八师",
): "Shihezi, Xinjiang Production and Construction Corps 8th Division",
(
"新疆维吾尔自治区",
"第八师石河子",
): "Shihezi, Xinjiang Production and Construction Corps 8th Division",
(
"新疆维吾尔自治区",
"第八师石河子市",
): "Shihezi, Xinjiang Production and Construction Corps 8th Division",
("新疆维吾尔自治区", "第六师"): "Xinjiang Production and Construction Corps 6th Division",
("新疆维吾尔自治区", "胡杨河"): (
"Xinjiang Production and Construction Corps 7th Division",
"Huyanghe",
),
("新疆维吾尔自治区", "阿克苏"): "Akesu",
("河北省", "邯郸市"): "Handan",
("河南省", "邓州"): "Zhengzhou",
("河南省", "长垣"): "Changyuan",
("河南省", "长垣县"): "Changyuan",
("河南省", "鹤壁市"): "Hebi",
("海南省", "陵水县"): "Lingshui Li Autonomous County",
("甘肃省", "白银市"): "Baiyin",
("甘肃省", "金昌市"): "Jinchang",
("重庆市", "石柱"): "Shizhu Tujia Autonomous County",
("重庆市", "秀山"): "Xiushan Tujia and Miao Autonomous County",
("重庆市", "酉阳"): "Youyang Tujia and Miao Autonomous County",
("青海省", "西宁市"): "Xining",
# this is not missing but a typo in the original dataset
("河南省", "邓州"): "Dengzhou",
("江苏省", "淮安"): "Huai'an",
}
# clean up cityEnglishName
for cn_name, values in cityEnglishName_dict.items():
if isinstance(values, str):
df.loc[cn_name, "cityEnglishName"] = values
elif len(values) == 2:
df.loc[cn_name, ["cityEnglishName", "adm3_name"]] = values
# rename variables
df.rename(
{
"provinceEnglishName": "adm1_name",
"cityEnglishName": "adm2_name",
"city_confirmedCount": "cum_confirmed_cases",
"city_deadCount": "cum_deaths",
"city_curedCount": "cum_recoveries",
},
axis=1,
inplace=True,
)
# extract dates
df.loc[:, "updateTime"] = pd.to_datetime(df["updateTime"])
df.loc[:, "date"] = df["updateTime"].dt.date
df.loc[:, "date"] = pd.to_datetime(df["date"])
# choose the latest observation in each day
df = df.sort_values(by=["updateTime"])
df = df.drop_duplicates(
subset=["adm1_name", "adm2_name", "adm3_name", "date"], keep="last"
)
# subset columns
df = df.loc[
:,
[
"adm1_name",
"adm2_name",
"adm3_name",
"date",
"notes",
"cum_confirmed_cases",
"cum_deaths",
"cum_recoveries",
],
]
# for big cities, adjust adm level
mask = df["adm1_name"].isin(["Shanghai", "Beijing", "Tianjin", "Chongqing"])
df.loc[mask, "adm3_name"] = df.loc[mask, "adm2_name"].tolist()
df.loc[mask, "adm2_name"] = df.loc[mask, "adm1_name"].tolist()
# drop cases unassigned to cities
df = df.loc[df["notes"] != "prison", :]
df = df.loc[
~df["adm2_name"].isin(
["International Imported Cases", "Domestic Imported Cases", "Unknown"]
),
:,
]
# aggregate to city level
df = (
df.groupby(["adm1_name", "adm2_name", "date"])
.agg(
cum_confirmed_cases=pd.NamedAgg(
column="cum_confirmed_cases", aggfunc=np.nansum
),
cum_deaths=pd.NamedAgg(column="cum_deaths", aggfunc=np.nansum),
cum_recoveries=pd.NamedAgg(column="cum_recoveries", aggfunc=np.nansum),
)
.reset_index()
)
# fill adm0_name variable
df.loc[:, "adm0_name"] = "CHN"
## Merge with pre 01/24 data, create balanced panel
# merge with pre 1/24 data
df = pd.concat([df, df_jan_merged], sort=False)
# createa balanced panel
adm = df.loc[:, ["adm0_name", "adm1_name", "adm2_name"]].drop_duplicates()
days = pd.date_range(start="20200110", end=end_date)
adm_days = pd.concat([adm.assign(date=d) for d in days])
print(f"Sample: {len(adm)} cities; {len(days)} days.")
df = pd.merge(
adm_days, df, how="left", on=["adm0_name", "adm1_name", "adm2_name", "date"]
)
# fill N/A for the first day
df.loc[df["date"] == pd.Timestamp("2020-01-10"), :] = df.loc[
df["date"] == pd.Timestamp("2020-01-10"), :
].fillna(0)
# forward fill
df = df.set_index(["adm0_name", "adm1_name", "adm2_name"]).sort_index()
for _, row in adm.iterrows():
df.loc[tuple(row), :] = df.loc[tuple(row), :].fillna(method="ffill")
## Load and clean policy data
# load dataset of the policies in China
df_policy = pd.read_csv(policy_file).dropna(how="all")
# subset columns
df_policy = df_policy.loc[
:, ["adm0_name", "adm1_name", "adm2_name", "date_start", "date_end", "policy"]
]
# save set of policies
policy_set = df_policy["policy"].unique().tolist()
# parse
df_policy.loc[:, "date_start"] = pd.to_datetime(df_policy["date_start"])
df_policy.loc[:, "date_end"] = pd.to_datetime(df_policy["date_end"])
# check city name agreement
policy_city_set = set(
df_policy.loc[:, ["adm0_name", "adm1_name", "adm2_name"]]
.drop_duplicates()
.apply(tuple, axis=1)
.tolist()
)
adm2_set = set(adm.drop_duplicates().apply(tuple, axis=1).tolist())
adm1_set = set(
adm.loc[:, ["adm0_name", "adm1_name"]]
.drop_duplicates()
.apply(lambda x: (*x, "All"), axis=1)
.tolist()
)
print("Mismatched: ", policy_city_set - (adm1_set | adm2_set))
# subset adm1 policies
adm1_policy = df_policy.loc[df_policy["adm2_name"] == "All", :]
# merge to create balanced panel
adm1_policy = pd.merge(
adm,
adm1_policy.drop(["adm2_name"], axis=1),
how="left",
on=["adm0_name", "adm1_name"],
).dropna(subset=["policy"])
print("no. of adm1 policies: ", adm1_policy.shape[0])
# subset adm2 policies
adm2_policy = df_policy.loc[df_policy["adm2_name"] != "All", :]
print("no. of adm2 policies: ", adm2_policy.shape[0])
# concat policies at different levels
df_policy = pd.concat([adm1_policy, adm2_policy])
# sort by date to discard duplicates
df_policy = df_policy.sort_values(by=["date_start"])
# drop duplicates
df_policy = df_policy.drop_duplicates(
subset=["adm1_name", "adm2_name", "policy"], keep="first"
)
df_policy_set = set(
df_policy.loc[:, ["adm0_name", "adm1_name", "adm2_name"]]
.drop_duplicates()
.apply(tuple, axis=1)
.tolist()
)
print("Cities without any policies: ", len(adm2_set - df_policy_set))
print(adm2_set - df_policy_set)
# unstack to flip policy type to columns
df_policy = df_policy.set_index(
["adm0_name", "adm1_name", "adm2_name", "policy"]
).unstack("policy")
# prepare to merge with multi index
adm_days.set_index(["adm0_name", "adm1_name", "adm2_name"], inplace=True)
adm_days.columns = pd.MultiIndex.from_tuples([("date", "")])
# merge to create balanced panel
df_policy = pd.merge(
adm_days, df_policy, how="left", on=["adm0_name", "adm1_name", "adm2_name"]
)
# fill N/As for dates
df_policy = df_policy.fillna(pd.Timestamp("2021-01-01"))
# convert to dummies
for policy in policy_set:
df_policy.loc[:, (policy, "")] = (
df_policy.loc[:, ("date", "")] >= df_policy.loc[:, ("date_start", policy)]
) & (df_policy.loc[:, ("date", "")] <= df_policy.loc[:, ("date_end", policy)])
# discard intermediate variables
df_policy = df_policy[["date"] + policy_set]
# flatten the column index
df_policy.columns = df_policy.columns.get_level_values(0)
# convert data type
df_policy.loc[:, policy_set] = df_policy.loc[:, policy_set].astype(int)
df = pd.merge(
df, df_policy, how="inner", on=["adm0_name", "adm1_name", "adm2_name", "date"]
)
## Merge with testing policies
# merge with testing policies
# source:
# https://english.kyodonews.net/news/2020/02/6982cc1e130f-china-records-2-straight-days-of-fewer-than-1000-new-covid-19-cases.html
# https://www.worldometers.info/coronavirus/how-to-interpret-feb-12-case-surge/
# https://www.medrxiv.org/content/10.1101/2020.03.23.20041319v1.full.pdf
df.loc[:, "testing_regime"] = (
(df["date"] > pd.Timestamp("2020-01-17")).astype(int)
+ (df["date"] > pd.Timestamp("2020-01-27")).astype(int)
+ (df["date"] > pd.Timestamp("2020-02-05")).astype(int)
+ (df["date"] > pd.Timestamp("2020-02-12")).astype(int)
+ (df["date"] > pd.Timestamp("2020-02-19")).astype(int)
+ (df["date"] > pd.Timestamp("2020-03-04")).astype(int)
)
# df.describe(include='all') # looks fine
## Multiple sanity checks, Save
# drop/impute non monotonic observations
for col in ["cum_confirmed_cases", "cum_deaths", "cum_recoveries"]:
for _, row in adm.iterrows():
df.loc[tuple(row), col] = convert_non_monotonic_to_nan(
df.loc[tuple(row), col].values
)
df.loc[tuple(row), col + "_imputed"] = log_interpolate(
df.loc[tuple(row), col].values
)
# add city id
df = pd.merge(
df,
adm.assign(adm2_id=range(adm.shape[0])),
how="left",
on=["adm0_name", "adm1_name", "adm2_name"],
)
## merge on populations
chnpop = | pd.read_csv(pop_file, usecols=[1, 2, 3, 4], index_col=[0, 1, 2]) | pandas.read_csv |
from oauth2client.service_account import ServiceAccountCredentials
from apiclient.discovery import build
from httplib2 import Http
from io import StringIO
import pandas
import dv360_tools_helpers as dvt
import argparse
from distutils.util import strtobool
scopes = ['https://www.googleapis.com/auth/doubleclickbidmanager']
key = 'key.json'
credentials = ServiceAccountCredentials.from_json_keyfile_name(key, scopes=scopes)
http_auth = credentials.authorize(Http())
dbm = build('doubleclickbidmanager', 'v1', http=http_auth)
def get_template(io_id, write=False, write_name='template.csv'):
"""
Access template IO line items
Return as StringIO with option to write to file
:param io_id: template
:param write: bool; whether or not template gets written to file
:param write_name: string; name of template file name
:return: StringIO object;
"""
# API Request Information
body = {
"fileTypes": ["LINE_ITEM"], "filterType": "INSERTION_ORDER_ID", "filterIds": [io_id], "version": "3.1", }
request = dbm.sdf().download(body=body).execute()
if write is True:
with open(write_name, 'w') as f:
f.write(request['lineItems'])
f.close()
buffer = StringIO(request['lineItems'])
return buffer
def write_sdf(template, modifier, io_id, audience, geo, trow):
"""
:param template: string; DV360 template Insertion Order ID
:param modifier: string; modifier file name; should be included in the same directory as this file
:param io_id: string; id of the insertion order you plan to upload the SDF to
:param trow: integer; specific row to be used as template
:param audience: bool; are you manipulating audience targeting - mutually exclusive w/ geo
:param geo: bool; are you manipulating geography targeting - mutually exclusive w/ audience
:return: None
"""
sdf_out = pandas.read_csv(template)
mod = pandas.read_csv(modifier, skipinitialspace=True, encoding='latin1')
sdf_out = | pandas.DataFrame(sdf_out.iloc[[trow]]) | pandas.DataFrame |
import pandas as pd
import pyodbc
from queue import Queue
from threading import Thread
import math
from distutils.util import strtobool
import shutil
import os
class TruncationException(Exception):
pass
class NullValueException(Exception):
pass
class MissingColumnsException(Exception):
pass
class SQLException(Exception):
pass
def task(table_name, schema, cnxn_string, df_queue, columns, ignore_truncation, cursors, exceptions):
column_select_list = ', '.join(columns['SELECT_SAFE_COLUMN_NAME'])
column_specification = '(' + column_select_list + ')'
cnxn = pyodbc.connect(cnxn_string)
crsr = cnxn.cursor()
if ignore_truncation:
crsr.execute('SET ANSI_WARNINGS OFF')
# Create temp tables based on target table definition
temp_table_statement = f'SELECT {column_select_list} INTO #TEMP FROM {schema}.{table_name} WHERE 1=0'
crsr.execute(temp_table_statement)
crsr.commit()
# generate and execute insertion statements
try:
while not df_queue.empty():
df = df_queue.get()
insert_statement = f'INSERT INTO #TEMP {column_specification} VALUES\n'
for i, row in df.iterrows():
insert_line = '('
for column, data_type in zip(columns['COLUMN_NAME'], columns['DATA_TYPE']):
# Handle NULLs first
if | pd.isnull(row[column]) | pandas.isnull |
# https://colab.research.google.com/notebooks/mlcc/first_steps_with_tensor_flow.ipynb
from __future__ import print_function
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
from utils.input_fn import my_input_fn
dirname = os.path.dirname(__file__)
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
csv = os.path.join(dirname, '../datasets/california_housing_train.csv')
california_housing_dataframe = | pd.read_csv(csv, sep=",") | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
labels = ['Initial y offset', 'Final x error', 'Final y error']
data = [
[0, 0, 0.25],
[0, -0.05, 0.15],
[0, -0.05, 0.10],
[0.5, -.1, .2],
[0.5, -.1, .2],
[0.5, -.1, .25],
[-0.5, -.05, -.05],
[-0.5, -.1, -.1],
[-0.5, np.nan, np.nan],
[1.5, -.45, .3],
[1.5, -.5, .7],
[1.5, -.45, .5]
]
df = | pd.DataFrame(data, columns=labels, dtype=float) | pandas.DataFrame |
import pandas as pd
import time
from datetime import datetime
import warnings
warnings.filterwarnings('ignore')
data = pd.read_csv('new3.csv',encoding='gbk')
# 删掉不用的数据
del data['之前时刻']
# 将两次时间差值转化成秒的形式
data['差值'] = data['差值'].apply(lambda x: int(x[7:9])*3600+int(x[10:12])*60+int(x[13:15]))
# 目前思路是把填充好的数据放到一个新的csv里面,先只保留车速和加速度(加速度后面求)
out = | pd.DataFrame([],columns=['车速','加速度','发动机']) | pandas.DataFrame |
from glob import glob
import os
import pandas as pd
import json
from matplotlib import pyplot as plt
import numpy as np
import clifford
def load_results(path):
with open(path, "r", encoding="utf-8") as result_file:
data = json.load(result_file)
file_name = os.path.splitext(os.path.basename(path))[0]
# file name: <lib>_<function>_<elements>
lib_name, fn_name, num_elements = file_name.split("_")
num_elements = int(num_elements)
benchmarks = data["benchmarks"]
assert len(benchmarks) == 1
benchmark = benchmarks[0]
mean, stddev = benchmark["stats"]["mean"], benchmark["stats"]["stddev"]
return {
"lib_name": lib_name,
"fn_name": fn_name,
"num_elements": num_elements,
"mean": mean,
"stddev": stddev
}
def main():
result_paths = sorted(glob(os.path.join("results", "*.json")))
out_path = "output"
os.makedirs(out_path, exist_ok=True)
all_results = list(map(load_results, result_paths))
df = | pd.DataFrame(all_results) | pandas.DataFrame |
import pandas as pd
import re
import numpy as np
def evaluate():
ground_truth = pd.read_csv('data/ground_truth.csv', index_col=False)
results = | pd.read_csv('results.csv', low_memory=False) | pandas.read_csv |
from collections import defaultdict
import json
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
import tenkit
from tenkit_tools.utils import load_best_group
from sklearn.metrics import roc_auc_score
import h5py
RUN_FOLDER = Path("201128_noise_05_20_30_40")
LOG_FOLDER = RUN_FOLDER/"results"
NUM_EXPERIMENTS = 50
MAX_ITS = 1000
TIME_NAME = 'time'
EXPERIMENT_NAMES = [
"double_split",
"single_split_Dk",
# "double_split_NOPF2"
"flexible_coupling"
]
def load_logs(log_folder: Path, experiment_num: int) -> dict:
logs = {}
for experiment_name in EXPERIMENT_NAMES:
with (log_folder/f"{experiment_name}_{experiment_num:03d}.json").open() as f:
logs[experiment_name] = json.load(f)
return logs
#with (log_folder/f"single_split_C_{experiment_num:03d}.json").open() as f:
# single_split_C_results = json.load(f)
#with (log_folder/f"single_split_Dk_{experiment_num:03d}.json").open() as f:
# single_split_Dk_results = json.load(f)
#with (log_folder/f"flexible_coupling_{experiment_num:03d}.json").open() as f:
# flexible_coupling_results = json.load(f)
#return double_split_results, single_split_Dk_results
#return double_split_results, single_split_C_results, single_split_Dk_results, flexible_coupling_results
def load_double_split_logs(log_folder: Path, experiment_num: int) -> dict:
with (log_folder/f"double_split_{experiment_num:03d}.json").open() as f:
double_split_results = json.load(f)
return double_split_results,
def load_checkpoint(log_folder: Path, experiment_name: str, experiment_num: int) -> tenkit.decomposition.EvolvingTensor:
checkpoint_folder = log_folder/"checkpoints"
EvolvingTensor = tenkit.decomposition.EvolvingTensor
with h5py.File(checkpoint_folder/f"{experiment_name}_{experiment_num:03d}.h5", "r") as h5:
group = load_best_group(h5)
estimated = EvolvingTensor.load_from_hdf5_group(group)
return estimated
def load_checkpoints(log_folder: Path, experiment_num: int) -> list:
return {experiment_name: load_checkpoint(log_folder, experiment_name, experiment_num)
for experiment_name in EXPERIMENT_NAMES}
def load_decomposition(log_folder: Path, experiment_num: int) -> tenkit.decomposition.EvolvingTensor:
checkpoint_folder = log_folder/"decompositions"
EvolvingTensor = tenkit.decomposition.EvolvingTensor
with h5py.File(checkpoint_folder/f"{experiment_num:03d}.h5", "r") as h5:
estimated = EvolvingTensor.load_from_hdf5_group(h5["evolving_tensor"])
return estimated
def compute_accuracies(log_folder: Path, progress=False) -> dict:
accuracies = {}
if progress:
range_ = trange
else:
range_ = range
for experiment_num in range_(NUM_EXPERIMENTS):
checkpoints = load_checkpoints(log_folder, experiment_num)
true = load_decomposition(log_folder, experiment_num)
for name, decomposition in checkpoints.items():
if name not in accuracies:
accuracies[name] = {
'Sensitivity': [],
'Specificity': [],
'Dice': [],
'ROC AUC': [],
}
# Binarize
B = np.array(decomposition.B)
B /= np.linalg.norm(B, axis=1, keepdims=True)
estimated_map = abs(B) > 1e-8
true_map = np.array(true.B) > 1e-8
# Compute metrics
accuracies[name]['Sensitivity'].append(np.sum(estimated_map*true_map) / np.sum(true_map))
accuracies[name]['Specificity'].append(np.sum((1 - estimated_map)*(1 - true_map)) / np.sum(1 - true_map))
accuracies[name]['Dice'].append(2*np.sum(estimated_map*true_map) / (np.sum(true_map) + np.sum(estimated_map)))
accuracies[name]['ROC AUC'].append(roc_auc_score(true_map.ravel().astype(int), B.ravel()))
return accuracies
def create_summaries(experiment_log: dict) -> dict:
"""Takes a single result dict as input and creates a summary.
Summary just contains the logs for the final iteration.
"""
summary = {}
for key, value in experiment_log.items():
summary[key] = value[-1]
return summary
def load_summaries(log_folder, num_experiments: int) -> (dict, dict):
"""Take number of experiments as input and return two dicts, one for logs and one for summaries.
The keys of these dicts are the experiment types (e.g. double_split) and the values are dictionaries of lists.
The keys of the inner dictionaries are log-types (e.g. fms) and the values are lists.
The i-th element of these lists are the logs and summaries for the i-th experiment.
"""
logs = {
experiment_name: defaultdict(list) for experiment_name in EXPERIMENT_NAMES
}
summaries = {
experiment_name: defaultdict(list) for experiment_name in EXPERIMENT_NAMES
}
for i in range(num_experiments):
for experiment_name, log in load_logs(log_folder, i).items():
for key, value in log.items():
logs[experiment_name][key].append(value)
for experiment_name, log in load_logs(log_folder, i).items():
summary = create_summaries(log)
for key, value in summary.items():
summaries[experiment_name][key].append(value)
logs = {key: dict(value) for key, value in logs.items()}
summaries = {key: dict(value) for key, value in summaries.items()}
return logs, summaries
def load_double_split_summaries(log_folder, num_experiments: int) -> (dict, dict):
"""Take number of experiments as input and return two dicts, one for logs and one for summaries.
The keys of these dicts are the experiment types (e.g. double_split) and the values are dictionaries of lists.
The keys of the inner dictionaries are log-types (e.g. fms) and the values are lists.
The i-th element of these lists are the logs and summaries for the i-th experiment.
"""
experiment_names = (
'double_split',
)
logs = {
experiment_name: defaultdict(list) for experiment_name in experiment_names
}
summaries = {
experiment_name: defaultdict(list) for experiment_name in experiment_names
}
for i in range(num_experiments):
for experiment_name, log in zip(experiment_names, load_double_split_logs(log_folder, i)):
for key, value in log.items():
logs[experiment_name][key].append(value)
for experiment_name, log in zip(experiment_names, load_double_split_logs(log_folder, i)):
summary = create_summaries(log)
for key, value in summary.items():
summaries[experiment_name][key].append(value)
return logs, summaries
def make_log_array(log_list: list) -> np.array:
"""Takes uneven list of logs and creates a 2D numpy array where the last element of each list is used as padding.
"""
log_array = np.zeros((NUM_EXPERIMENTS, MAX_ITS))
for i, log in enumerate(log_list):
num_its = len(log)
log_array[i, :num_its] = log
log_array[i, num_its:] = log[-1]
return log_array
def make_summary_df(summaries):
"""Convert nested dictionary of summaries (inner dicts represent summaries for one method) into a single dataframe.
"""
summary_dfs = {method: | pd.DataFrame(summary) | pandas.DataFrame |
"""Module with the tests for the pileup creation realted tasks."""
import os
import unittest
from unittest.mock import patch
from unittest.mock import MagicMock, PropertyMock
import pandas as pd
from pandas.testing import assert_frame_equal
import numpy as np
from hicognition.test_helpers import LoginTestCase, TempDirTestCase
from hicognition.utils import get_optimal_binsize
# add path to import app
# import sys
# sys.path.append("./")
from app import db
from app.models import Dataset, Intervals, Assembly, Task, ObsExp
from app.tasks import pipeline_pileup
from app.pipeline_steps import pileup_pipeline_step
from app.pipeline_worker_functions import (
_do_pileup_fixed_size,
_do_pileup_variable_size,
)
class TestPipelinePileup(LoginTestCase, TempDirTestCase):
"""Tests whether pipelin_pileup task calls
the pipeline steps correctly"""
def setUp(self):
"""Add test dataset"""
# call setUp of LoginTestCase to initialize app
super(TestPipelinePileup, self).setUp()
# add assembly
self.hg19 = Assembly(
id=1,
name="hg19",
chrom_sizes=self.app.config["CHROM_SIZES"],
chrom_arms=self.app.config["CHROM_ARMS"],
)
db.session.add(self.hg19)
db.session.commit()
# add dataset
self.bedfile = Dataset(id=1, filetype="bedfile", user_id=1, assembly=1)
self.coolerfile = Dataset(id=2, filetype="cooler", user_id=1, assembly=1)
# add intervals
self.intervals1 = Intervals(
id=1, name="testRegion1", dataset_id=1, windowsize=200000
)
self.intervals2 = Intervals(
id=2, name="testRegion2", dataset_id=1, windowsize=200000
)
# make tasks
self.finished_task1 = Task(
id="test1", dataset_id=2, intervals_id=1, complete=True
)
self.unfinished_task1 = Task(
id="test1", dataset_id=2, intervals_id=1, complete=False
)
@staticmethod
def get_call_args_without_index(mock, remove_index):
"""extracts call args from magic mock object and removes
object at index"""
call_args = []
for call in mock.call_args_list:
current_call_list = []
for index in range(len(call[0])):
if index == remove_index:
# remove chromosome arms dataframe
continue
current_call_list.append(call[0][index])
call_args.append(current_call_list)
return call_args
@patch("app.pipeline_steps.set_task_progress")
@patch("app.pipeline_steps.pileup_pipeline_step")
def test_pipeline_pileup_calls_steps_correctly(
self, mock_pileup_pipeline_step, mock_set_progress
):
"""Tests whether the functions that execute the different pipeline steps are called
correctly."""
# add datasets
db.session.add_all(
[self.coolerfile, self.bedfile, self.intervals1, self.intervals2]
)
db.session.commit()
# launch task
binsize = 10000
dataset_id = 2
intervals_id = 2
pileup_types = ["ICCF", "Obs/Exp"]
pipeline_pileup(dataset_id, intervals_id, binsize)
# construct call arguments, pd.dataframe breaks magicmocks interval methods
call_args = self.get_call_args_without_index(mock_pileup_pipeline_step, 3)
# compare expected call arguments with actual call arguments
for pileup_type in pileup_types:
# check whether the current combination is in call args list
expected_call_args = [dataset_id, intervals_id, binsize, pileup_type]
self.assertTrue(expected_call_args in call_args)
# check whether number of calls was as expected
self.assertEqual(len(call_args), len(pileup_types))
# check whether last call to set task progress was 100
mock_set_progress.assert_called_with(100)
@patch("app.pipeline_steps.pd.read_csv")
@patch("app.pipeline_steps.set_task_progress")
@patch("app.pipeline_steps.pileup_pipeline_step")
def test_dataset_state_not_changed_if_not_last(
self, mock_pileup, mock_set_progress, mock_read_csv
):
"""tests whether dataset state is left unchanged if it is not the last task for
this dataset/intervals combination."""
# set up database
self.bedfile.processing_features = [self.coolerfile]
db.session.add_all(
[self.bedfile, self.coolerfile, self.intervals1, self.unfinished_task1]
)
# call pipeline
pipeline_pileup(2, 1, 10000)
# check whether processing has finished
self.assertEqual(self.bedfile.processing_features, [self.coolerfile])
@patch("app.pipeline_steps.pd.read_csv")
@patch("app.pipeline_steps.set_task_progress")
@patch("app.pipeline_steps.pileup_pipeline_step")
def test_dataset_set_finished_if_last(
self, mock_pileup, mock_set_progress, mock_read_csv
):
"""tests whether dataset is set finished correctly if it is the last task for
this dataset/intervals combination."""
# set up database
self.bedfile.processing_features = [self.coolerfile]
db.session.add_all(
[self.bedfile, self.coolerfile, self.intervals1, self.finished_task1]
)
# call pipeline
pipeline_pileup(2, 1, 10000)
# check whether processing has finished
self.assertEqual(self.bedfile.processing_features, [])
@patch("app.pipeline_steps.log.error")
@patch("app.pipeline_steps.pd.read_csv")
@patch("app.pipeline_steps.set_task_progress")
@patch("app.pipeline_steps.pileup_pipeline_step")
def test_dataset_set_failed_if_failed(
self, mock_pileup, mock_set_progress, mock_read_csv, mock_log
):
"""tests whether dataset is set as faild if problem arises."""
# set up exception raising
mock_pileup.side_effect = ValueError("Test")
# set up database
self.bedfile.processing_features = [self.coolerfile]
db.session.add_all(
[self.bedfile, self.coolerfile, self.intervals1, self.unfinished_task1]
)
# call pipeline
pipeline_pileup(2, 1, 10000)
# check whether processing has finished
self.assertEqual(self.bedfile.failed_features, [self.coolerfile])
self.assertEqual(self.bedfile.processing_features, [])
assert mock_log.called
class TestPileupPipelineStep(LoginTestCase, TempDirTestCase):
"""Test pileup worker functions for point and interval features."""
def setUp(self):
"""Add test dataset"""
# call setUp of LoginTestCase to initialize app
super().setUp()
# add assembly
self.hg19 = Assembly(
id=1,
name="hg19",
chrom_sizes=self.app.config["CHROM_SIZES"],
chrom_arms=self.app.config["CHROM_ARMS"],
)
db.session.add(self.hg19)
db.session.commit()
# add dataset
self.dataset = Dataset(
dataset_name="test3",
file_path="/test/path/test3.mcool",
filetype="cooler",
processing_state="finished",
user_id=1,
assembly=1,
)
self.dataset2 = Dataset(
dataset_name="test4",
file_path="/test/path/test4.mcool",
filetype="cooler",
processing_state="finished",
user_id=1,
assembly=1,
)
# add intervals
self.intervals1 = Intervals(name="testRegion1", dataset_id=1, windowsize=200000)
self.intervals2 = Intervals(name="testRegion2", dataset_id=1, windowsize=300000)
self.intervals3 = Intervals(name="testRegion2", dataset_id=1, windowsize=None)
db.session.add(self.dataset)
db.session.add(self.dataset2)
db.session.add(self.intervals1)
db.session.add(self.intervals2)
db.session.add(self.intervals3)
db.session.commit()
@patch("app.pipeline_steps.worker_funcs._do_pileup_fixed_size")
@patch("app.pipeline_steps.worker_funcs._do_pileup_variable_size")
def test_correct_pileup_worker_function_used_point_feature(
self, mock_pileup_variable_size, mock_pileup_fixed_size
):
"""Tests whether correct worker function for pileup is used
when intervals has fixed windowsizes"""
# add return values
mock_pileup_fixed_size.return_value = np.full((2, 2, 2), np.nan)
# dispatch call
dataset_id = 1
intervals_id = 1
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
pileup_pipeline_step(dataset_id, intervals_id, 10000, arms, "ICCF")
# check whether pileup with fixed size was called and with variable size was not called
mock_pileup_fixed_size.assert_called_once()
mock_pileup_variable_size.assert_not_called()
@patch("app.pipeline_steps.worker_funcs._do_pileup_fixed_size")
@patch("app.pipeline_steps.worker_funcs._do_pileup_variable_size")
def test_correct_pileup_worker_function_used_interval_feature(
self, mock_pileup_variable_size, mock_pileup_fixed_size
):
"""Tests whether correct worker function for pileup is used
when intervals has variable windowsizes"""
# add return values
mock_pileup_variable_size.return_value = np.full((2, 2, 2), np.nan)
# dispatch call
dataset_id = 1
intervals_id = 3
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
pileup_pipeline_step(dataset_id, intervals_id, 10000, arms, "ICCF")
# check whether pileup with fixed size was called and with variable size was not called
mock_pileup_variable_size.assert_called_once()
mock_pileup_fixed_size.assert_not_called()
@patch("app.pipeline_steps.uuid.uuid4")
@patch("app.pipeline_steps.worker_funcs._add_embedding_2d_to_db")
@patch("app.pipeline_steps.worker_funcs._add_pileup_db")
@patch("app.pipeline_steps.worker_funcs._do_pileup_fixed_size")
@patch("app.pipeline_steps.worker_funcs._do_pileup_variable_size")
def test_adding_to_db_called_correctly(
self,
mock_pileup_variable_size,
mock_pileup_fixed_size,
mock_add_pileup_db,
mock_add_embedding_db,
mock_uuid,
):
"""Tests whether function to add result to database is called correctly."""
# add return values
mock_pileup_fixed_size.return_value = np.full((2, 2, 2), np.nan)
# hack in return value of uuid4().hex to be asdf
uuid4 = MagicMock()
type(uuid4).hex = PropertyMock(return_value="asdf")
mock_uuid.return_value = uuid4
# construct call args
dataset_id = 1
intervals_id = 1
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
pileup_pipeline_step(dataset_id, intervals_id, 10000, arms, "ICCF")
# check whether adding to pileup db is called correctly
mock_add_pileup_db.assert_called_with(
self.app.config["UPLOAD_DIR"] + "/asdf.npy",
10000,
self.intervals1.id,
self.dataset.id,
"ICCF",
)
# check whether adding embedding to db is called correctly
mock_add_embedding_db.assert_any_call(
{
"embedding": self.app.config["UPLOAD_DIR"] + "/asdf_embedding.npy",
"cluster_ids": self.app.config["UPLOAD_DIR"]
+ "/asdf_cluster_ids_small.npy",
"thumbnails": self.app.config["UPLOAD_DIR"]
+ "/asdf_thumbnails_small.npy",
},
10000,
self.intervals1.id,
self.dataset.id,
"ICCF",
"small",
)
mock_add_embedding_db.assert_any_call(
{
"embedding": self.app.config["UPLOAD_DIR"] + "/asdf_embedding.npy",
"cluster_ids": self.app.config["UPLOAD_DIR"]
+ "/asdf_cluster_ids_large.npy",
"thumbnails": self.app.config["UPLOAD_DIR"]
+ "/asdf_thumbnails_large.npy",
},
10000,
self.intervals1.id,
self.dataset.id,
"ICCF",
"large",
)
class TestPileupWorkerFunctionsFixedSize(LoginTestCase, TempDirTestCase):
"""Test pileup worker functions for fixed sized intervals."""
def setUp(self):
"""Add test dataset"""
# call setUp of LoginTestCase to initialize app
super().setUp()
# add assembly
self.hg19 = Assembly(
id=1,
name="hg19",
chrom_sizes=self.app.config["CHROM_SIZES"],
chrom_arms=self.app.config["CHROM_ARMS"],
)
db.session.add(self.hg19)
db.session.commit()
# add dataset
self.cooler = Dataset(
dataset_name="test3",
file_path="./tests/testfiles/test.mcool",
filetype="cooler",
processing_state="finished",
user_id=1,
assembly=1,
)
db.session.add(self.cooler)
db.session.commit()
@patch("app.pipeline_worker_functions.HT.do_pileup_iccf")
@patch("app.pipeline_worker_functions.HT.do_pileup_obs_exp")
@patch("app.pipeline_worker_functions.HT.get_expected")
@patch("app.pipeline_worker_functions.HT.assign_regions")
@patch("app.pipeline_worker_functions.cooler.Cooler")
@patch("app.pipeline_worker_functions.pd.read_csv")
def test_correct_functions_called_obs_exp(
self,
mock_read_csv,
mock_cooler,
mock_assign_regions,
mock_get_expected,
mock_pileup_obs_exp,
mock_pileup_iccf,
):
"""Tests whether correct pileup function is called when obs/exp pileup is dispatched"""
test_df_interval = pd.DataFrame(
{0: ["chr1", "chr1"], 1: [0, 1000], 2: [1000, 2000]}
)
mock_read_csv.return_value = test_df_interval
mock_cooler.return_value = "mock_cooler"
returned_regions = MagicMock()
mock_assign_regions.return_value = returned_regions
mock_get_expected.return_value = pd.DataFrame()
# dispatch call
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
_do_pileup_fixed_size(self.cooler, 100000, 10000, "testpath", arms, "Obs/Exp")
# check whether get_expected was called
mock_get_expected.assert_called()
mock_pileup_obs_exp.assert_called()
# check whether iccf pileup is not called
mock_pileup_iccf.assert_not_called()
@patch("app.pipeline_worker_functions.HT.do_pileup_iccf")
@patch("app.pipeline_worker_functions.HT.do_pileup_obs_exp")
@patch("app.pipeline_worker_functions.HT.get_expected")
@patch("app.pipeline_worker_functions.HT.assign_regions")
@patch("app.pipeline_worker_functions.cooler.Cooler")
@patch("app.pipeline_worker_functions.pd.read_csv")
def test_correct_functions_called_iccf(
self,
mock_read_csv,
mock_cooler,
mock_assign_regions,
mock_get_expected,
mock_pileup_obs_exp,
mock_pileup_iccf,
):
"""Tests whether correct pileup function is called when iccf pileup is dispatched"""
test_df_interval = pd.DataFrame(
{0: ["chr1", "chr1"], 1: [0, 1000], 2: [1000, 2000]}
)
mock_read_csv.return_value = test_df_interval
mock_cooler.return_value = "mock_cooler"
returned_regions = MagicMock()
mock_assign_regions.return_value = returned_regions
mock_get_expected.return_value = "expected"
# dispatch call
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
_do_pileup_fixed_size(self.cooler, 100000, 10000, "testpath", arms, "ICCF")
# check whether get_expected was called
mock_get_expected.assert_not_called()
expected_pileup_call = ["mock_cooler", returned_regions.dropna()]
mock_pileup_iccf.assert_called_with(
*expected_pileup_call, proc=2, collapse=True
)
# check whether iccf pileup is not called
mock_pileup_obs_exp.assert_not_called()
def test_regions_with_bad_chromosomes_filled_with_nan(self):
"""Checks whether regions with bad chromosomes are filled with nans"""
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
with patch("app.pipeline_worker_functions.pd.read_csv") as mock_read_csv:
test_df_interval = pd.DataFrame(
{
0: ["chr1", "chrASDF", "chr1", "chrASDF"],
1: [60000000, 10, 50000000, 100],
2: [60000000, 10, 50000000, 150],
}
)
mock_read_csv.return_value = test_df_interval
# dispatch call
result = _do_pileup_fixed_size(
self.cooler, 10000000, 5000000, "testpath", arms, "ICCF", collapse=False
)
self.assertEqual(result.shape[2], 4)
test_array_one = np.array(
[
[0.13717751, 0.0265284, 0.01462106, 0.009942, 0.00682112],
[0.0265284, 0.18850834, 0.06237434, 0.0145492, 0.01485787],
[0.01462106, 0.06237434, 0.119365, 0.04225391, 0.01654861],
[0.009942, 0.0145492, 0.04225391, 0.12408607, 0.05381814],
[0.00682112, 0.01485787, 0.01654861, 0.05381814, 0.14363506],
]
)
test_array_two = np.array(
[
[0.23130276, 0.02327701, 0.0126868, 0.00436247, 0.00401918],
[0.02327701, 0.15173886, 0.07788348, 0.01425616, 0.01083477],
[0.0126868, 0.07788348, 0.13717751, 0.0265284, 0.01462106],
[0.00436247, 0.01425616, 0.0265284, 0.18850834, 0.06237434],
[0.00401918, 0.01083477, 0.01462106, 0.06237434, 0.119365],
]
)
self.assertTrue(np.allclose(result[..., 0], test_array_one))
self.assertTrue(np.all(np.isnan(result[..., 1])))
self.assertTrue(np.allclose(result[..., 2], test_array_two))
self.assertTrue(np.all(np.isnan(result[..., 3])))
def test_cooler_w_missing_resolutions_return_nans_w_collapse(self):
"""Tests whether calling pileup on a cooler with
a binsize that is not available returns an array of nans
with the right shape according to windowsize and binsize"""
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
with patch("app.pipeline_worker_functions.pd.read_csv") as mock_read_csv:
test_df_interval = pd.DataFrame(
{
0: ["chr1"],
1: [60000000],
2: [60000000],
}
)
mock_read_csv.return_value = test_df_interval
# dispatch call
result = _do_pileup_fixed_size(
self.cooler, 200000, 10000, "testpath", arms, "ICCF", collapse=True
)
self.assertEqual(result.shape, (40, 40))
self.assertTrue(np.all(np.isnan(result)))
def test_cooler_w_missing_resolutions_return_nans_wo_collapse(self):
"""Tests whether calling pileup on a cooler with
a binsize that is not available returns an array of nans
with the right shape according to windowsize and binsize"""
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
with patch("app.pipeline_worker_functions.pd.read_csv") as mock_read_csv:
test_df_interval = pd.DataFrame(
{
0: ["chr1", "chr1"],
1: [60000000, 10000],
2: [60000000, 10000],
}
)
mock_read_csv.return_value = test_df_interval
# dispatch call
result = _do_pileup_fixed_size(
self.cooler, 200000, 10000, "testpath", arms, "ICCF", collapse=False
)
self.assertEqual(result.shape, (40, 40, 2))
self.assertTrue(np.all(np.isnan(result)))
@patch("app.pipeline_worker_functions.HT.get_expected")
def test_cached_obs_exp_used(self, mock_expected):
"""Tests whether cached obs/exp dataset is used"""
arms = pd.read_csv(self.app.config["CHROM_ARMS"])
obs_exp = ObsExp(
dataset_id=self.cooler.id,
binsize=5000000,
filepath=os.path.join("./tests/testfiles", "expected.csv"),
)
db.session.add(obs_exp)
db.session.commit()
# create mock regions
test_df_interval = pd.DataFrame(
{
0: ["chr1", "chr1"],
1: [60000000, 10000],
2: [60000000, 10000],
}
)
mock_path = os.path.join(self.app.config["UPLOAD_DIR"], "mock_regions.csv")
test_df_interval.to_csv(mock_path, index=False, header=None, sep="\t")
# dispatch call
_do_pileup_fixed_size(
self.cooler, 10000000, 5000000, mock_path, arms, "Obs/Exp", collapse=False
)
mock_expected.assert_not_called()
def test_calculated_obs_exp_cached(self):
"""Tests whether cached obs/exp dataset is created if it does not exist already"""
arms = | pd.read_csv(self.app.config["CHROM_ARMS"]) | pandas.read_csv |
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None # default='warn'
def delete_first_line(path5):
dataframe = pd.read_csv(path5, sep=",", header=0)
df = dataframe.to_csv(path5.replace('.csv', '_no_header.csv'), index=None, header=None)
return path5.replace('.csv', '_no_header.csv')
def reindex(path3):
whole_dataframe = pd.read_csv(path3, sep=",")
whole_dataframe = whole_dataframe.sample(frac=1)
df = whole_dataframe.to_csv(path3.replace('.csv', '_reindexed.csv'), index=None,header=None)
return path3.replace('.csv', '_reindexed.csv')
class data_handling:
def __init__(self, path1=None,path2=None,path3=None):
self.path1 = path1
self.path2 = path2
self.path3 = path3
######## handling KDD99 raw datas
def handle_kdd99(self):
kdd99_dataframe = pd.read_csv(self.path1,sep=",")
#adding title for process
kdd99_dataframe.columns = ["duration","protocol_type","service","flag","src_bytes","dst_bytes","land","wrong_fragment","urgent","hot","num_failed_logins", "logged_in","num_compromised","root_shell","su_attempted","num_root","num_file_creations","num_shells","num_access_files","num_outbound_cmds","is_hot_login","is_guest_login","count","srv_count","serror_rate","srv_serror_rate","rerror_rate","srv_rerror_rate","same_srv_rate","diff_srv_rate","srv_diff_host_rate","dst_host_count","dst_host_srv_count","dst_host_same_srv_rate","dst_host_diff_srv_rate","dst_host_same_src_port_rate","dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate","dst_host_rerror_rate","dst_host_srv_rerror_rate","label"]
#remove kdd redundant column
kdd99_dataframe = kdd99_dataframe.drop(["hot","num_failed_logins", "logged_in","num_compromised","root_shell","su_attempted","num_root","num_file_creations","num_shells","num_access_files","num_outbound_cmds","is_hot_login","is_guest_login"],axis=1)
# change label format
for i in range(len(kdd99_dataframe)):
if kdd99_dataframe["label"][i] in ["back.","land.","neptune.","pod.","smurf.","teardrop."]:
kdd99_dataframe["label"][i] = "dos"
elif kdd99_dataframe["label"][i] in ["ipsweep.","nmap.","portsweep.","satan."]:
kdd99_dataframe["label"][i] = "scan"
elif kdd99_dataframe["label"][i] in ["ftp_write.","guess_passwd.","imap.","multihop.","phf.","spy.","warezclient.","warezmaster."]:
kdd99_dataframe["label"][i] = "r2l"
elif kdd99_dataframe["label"][i] in ["buffer_overflow.","loadmodule.","perl.","rootkit."]:
kdd99_dataframe["label"][i] = "u2r"
else:
kdd99_dataframe["label"][i] = "normal"
df = kdd99_dataframe.to_csv(self.path1.replace('.csv', '_kdd99_handled.csv') ,index = None)
return self.path1.replace('.csv', '_kdd99_handled.csv')
###### handling kdd99-extracted packet information
def handle_new(self):
new_dataframe = pd.read_csv(self.path2,sep=",")
# adding title
new_dataframe.columns = ["duration","protocol_type","service","flag","src_bytes","dst_bytes","land","wrong_fragment","urgent","count","srv_count","serror_rate","srv_serror_rate","rerror_rate","srv_rerror_rate","same_srv_rate","diff_srv_rate","srv_diff_host_rate","dst_host_count","dst_host_srv_count","dst_host_same_srv_rate","dst_host_diff_srv_rate","dst_host_same_src_port_rate","dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate","dst_host_rerror_rate","dst_host_srv_rerror_rate","lhost","lport","rhost","rport","time_stamp","label"]
# adding useful protocol information transform 445 and 3389 port to protocol name
for i in range(len(new_dataframe)):
if new_dataframe["rport"][i] == 445:
new_dataframe["service"][i] = "smb"
elif new_dataframe["rport"][i] == 3389:
new_dataframe["service"][i] = "rdp"
# drop useless column
new_dataframe = new_dataframe.drop(["lhost","lport","rhost","rport","time_stamp"],axis=1)
df = new_dataframe.to_csv(self.path2.replace('.csv', '_new_handled.csv') ,index = None)
return self.path2.replace('.csv', '_new_handled.csv')
# SOME title-needed MERGING FUNCTIONS,should preserve the title of csv
#####
## merge two csv file
def merge(self):
dataframe11 = pd.read_csv(self.path1,sep=",")
dataframe22 = pd.read_csv(self.path2,sep=",")
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
from Bio.Align import AlignInfo
from Bio import pairwise2
from Bio import AlignIO
import pandas as pd
import numpy as np
import pprint
import operator
import collections
import warnings
class C14Subunits(object):
def __init__(self, msa_fname, dyad_df, msa_format="stockholm", consensus_threshold = 0.3):
"""Get alignment and dataframe correspondind dyads and analyse for p10 and p20 subunit as well a linker"""
self.msa = msa = AlignIO.read(msa_fname, msa_format)
dyad_df.index = dyad_df.Seq_ID
align_info = AlignInfo.SummaryInfo(self.msa)
self.align_consensus = align_info.dumb_consensus(threshold = consensus_threshold)
score_tr = {'.': -1,'*':10 }
self.column_pp = { i: int(score_tr.get(score,score)) for i,score in enumerate(self.msa.column_annotations['posterior_probability'],1) }
self.msa_len = self.msa.get_alignment_length()
self.p10 = None
self.p20 = None
self.cys_indexes = []
self.his_indexes = []
for seq in self.msa:
if seq.id in dyad_df.index:
cys_site, his_site = dyad_df.loc[seq.id,["Caspase_CYS", "CASPASE_HIS"]]
if pd.notna(cys_site):
cys_align = pairwise2.align.localms(seq.seq, str(cys_site), 5, -4, -2, -1, one_alignment_only = True)
cys_seq, c_stop = cys_align[0][0:5:4]
c_index = self.get_position(cys_seq, cys_site, c_stop, "C")
self.cys_indexes.append(c_index)
if | pd.notna(his_site) | pandas.notna |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.size']=6
# plt.rcParams['lines.markersize']=7
plt.rcParams['lines.linewidth'] = 0.8
from sklearn import decomposition
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
import sys
sys.path.append(root_path)
def cumul_var_ratio(var_ratio):
sum=0.0
cumul_var_ratio=[]
for i in range(len(var_ratio)):
sum=sum+var_ratio[i]
cumul_var_ratio.append(sum)
return cumul_var_ratio
samples_setss=[
[
pd.read_csv(root_path+'/Huaxian_dwt/data/db10-2/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Huaxian_eemd/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Huaxian_modwt/data/db10-2/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Huaxian_ssa/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Huaxian_vmd/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
],
[
pd.read_csv(root_path+'/Xianyang_dwt/data/db10-2/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Xianyang_eemd/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Xianyang_modwt/data/db10-2/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Xianyang_ssa/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Xianyang_vmd/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
],
[
pd.read_csv(root_path+'/Zhangjiashan_dwt/data/db10-2/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Zhangjiashan_eemd/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Zhangjiashan_modwt/data/db10-2/one_step_1_ahead_forecast_pacf/train_samples.csv'),
pd.read_csv(root_path+'/Zhangjiashan_ssa/data/one_step_1_ahead_forecast_pacf/train_samples.csv'),
| pd.read_csv(root_path+'/Zhangjiashan_vmd/data/one_step_1_ahead_forecast_pacf/train_samples.csv') | pandas.read_csv |
"""Request and response schemas for the FastAPI app.
"""
import pandas as pd
from pydantic import BaseModel, conlist
from typing import Optional
# Request
class Product(BaseModel):
title: Optional[str] = ''
concatenated_tags: Optional[str] = ''
class ListOfProducts(BaseModel):
products: conlist(Product, min_items=1)
def to_dataframe(self) -> pd.DataFrame:
products = self.dict()["products"]
dataframe = | pd.DataFrame(products) | pandas.DataFrame |
import os
import json
import random
import pandas as pd
from model_wrangler.dataset_managers import BaseDatasetManager, LOGGER
def get_category_mask(df_in, family_col):
in_cat = random.choice(df_in[family_col].unique())
bool_mask = df_in[family_col] == in_cat
return bool_mask
def sample_within_family(df_in, batch_size=32, num_in_family=16, family_col='family'):
cat_mask = get_category_mask(df_in, family_col)
num_in_family = min([num_in_family, cat_mask.sum()])
within_rows = df_in.loc[cat_mask, :].sample(num_in_family)
remaining = min([batch_size - within_rows.shape[0], (~cat_mask).sum()])
other_rows = df_in.loc[~cat_mask, :].sample(remaining)
df_batch = | pd.concat([within_rows, other_rows]) | pandas.concat |
import os
import yaml
import xml.etree.ElementTree as ET
import pandas as pd
import json
import fnmatch
import glob
import argparse
import sox
ROOT_DIR = os.getcwd() + '/'
DATA_RAW = 'data/raw/IDMT-SMT-GUITAR_V2/dataset1'
DATA_DIR = os.path.join(ROOT_DIR, DATA_RAW) + '/'
ANNOTATION_PATH = '*/annotation/*.xml'
AUDIO_PATH = '*/audio/*.wav'
def write_annotation():
"""
Writing meta annotations
https://www.idmt.fraunhofer.de/en/business_units/m2d/smt/guitar.html
"""
# Operates at a subdirectory level
import re
subdirectories = glob.glob(DATA_DIR + ANNOTATION_PATH)
filt = lambda fn: re.search(r'Major|Minor', fn) is None
files = list(filter(filt, subdirectories))
all_meta = []
for filepath in files:
guitar_model = re.sub(DATA_DIR, '', filepath).split('/')[0]
tree = ET.parse(filepath)
root = tree.getroot()
record = {}
record['guitarModel'] = guitar_model
record['filepath'] = filepath
# COMMON FLOW FOR ITERATING OVER XML
for meta_attribute in root:
# two fields
if meta_attribute.tag == 'globalParameter':
for field in meta_attribute:
record[field.tag] = field.text
else:
# FILE DATA
for field in meta_attribute.find('event'):
record[field.tag] = field.text
track_path = filepath.replace('annotation', 'audio')
track_path = '/'.join(track_path.split('/')[:-1]) + '/'
record['audioFileName'] = track_path + record['audioFileName']
all_meta.append(record)
to_df = | pd.DataFrame(all_meta) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import *
import seaborn as sns
# Classification Project: Sonar rocks or mines
# Load libraries
from matplotlib import pyplot
from pandas import read_csv
from pandas import set_option
from pandas.tools.plotting import scatter_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
#read data into data frame
DATA_DIR = os.path.join('data', 'processed')
processed_data_paths = { 'individual' : { 'A': {'train': os.path.join(DATA_DIR, 'A', 'individual/A_indiv_train.csv'),
'test': os.path.join(DATA_DIR, 'A', 'individual/A_indiv_test.csv')},
'B': {'train': os.path.join(DATA_DIR, 'B', 'individual/B_indiv_train.csv'),
'test': os.path.join(DATA_DIR, 'B', 'individual/B_indiv_test.csv')},
'C': {'train': os.path.join(DATA_DIR, 'C', 'individual/C_indiv_train.csv'),
'test': os.path.join(DATA_DIR, 'C', 'individual/C_indiv_test.csv')}},
'household' : { 'A': {'train': os.path.join(DATA_DIR, 'A', 'household/A_hhold_train.csv'),
'test': os.path.join(DATA_DIR, 'A', 'household/A_hhold_test.csv')},
'B': {'train': os.path.join(DATA_DIR, 'B', 'household/B_hhold_train.csv'),
'test': os.path.join(DATA_DIR, 'B', 'household/B_hhold_test.csv')},
'C': {'train': os.path.join(DATA_DIR, 'C', 'household/C_hhold_train.csv'),
'test': os.path.join(DATA_DIR, 'C', 'household/C_hhold_test.csv')}},
'aggregated_indiv' : { 'A': {'train': os.path.join(DATA_DIR, 'A', 'individual/A_indiv_aggre_train.csv'),
'test': os.path.join(DATA_DIR, 'A', 'individual/A_indiv_aggre_test.csv')},
'B': {'train': os.path.join(DATA_DIR, 'B', 'individual/B_indiv_aggre_train.csv'),
'test': os.path.join(DATA_DIR, 'B', 'individual/B_indiv_aggre_test.csv')},
'C': {'train': os.path.join(DATA_DIR, 'C', 'individual/C_indiv_aggre_train.csv'),
'test': os.path.join(DATA_DIR, 'C', 'individual/C_indiv_aggre_test.csv')}},
'aggregated_hhold' : { 'A': {'train': os.path.join(DATA_DIR, 'aggregated', 'A_train.csv'),
'test': os.path.join(DATA_DIR, 'aggregated', 'A_test.csv')},
'B': {'train': os.path.join(DATA_DIR, 'aggregated', 'B_train.csv'),
'test': os.path.join(DATA_DIR, 'aggregated', 'B_test.csv')},
'C': {'train': os.path.join(DATA_DIR, 'aggregated', 'C_train.csv'),
'test': os.path.join(DATA_DIR, 'aggregated', 'C_test.csv')}},
'master' : { 'train' : os.path.join(DATA_DIR, 'aggregated', 'master_train.csv'),
'test' : os.path.join(DATA_DIR, 'aggregated', 'master_test.csv')}
}
'''
Treating the training DATA_DIR
'''
# load training data
b_household_train = pd.read_csv(processed_data_paths['aggregated_hhold']['B']['train'], index_col='id')
b_household_test = pd.read_csv(processed_data_paths['aggregated_hhold']['B']['test'], index_col='id')
a_household_train = | pd.read_csv(processed_data_paths['aggregated_hhold']['A']['train'], index_col='id') | pandas.read_csv |
from sales_analysis.data_pipeline import BASEPATH
from sales_analysis.data_pipeline._pipeline import SalesPipeline
import pytest
import os
import pandas as pd
# --------------------------------------------------------------------------
# Fixtures
@pytest.fixture
def pipeline():
FILEPATH = os.path.join(BASEPATH, "data")
DATA_FILES = [f for f in os.listdir(FILEPATH) if f.endswith('.csv')]
DATA = {f : pd.read_csv(os.path.join(FILEPATH, f)) for f in DATA_FILES}
return SalesPipeline(**DATA)
# --------------------------------------------------------------------------
# Data
data = {'customers': {pd.Timestamp('2019-08-01 00:00:00'): 9,
pd.Timestamp('2019-08-02 00:00:00'): 10,
pd.Timestamp('2019-08-03 00:00:00'): 10,
pd.Timestamp('2019-08-04 00:00:00'): 10,
pd.Timestamp('2019-08-05 00:00:00'): 9,
pd.Timestamp('2019-08-06 00:00:00'): 9,
pd.Timestamp('2019-08-07 00:00:00'): 10,
pd.Timestamp('2019-08-08 00:00:00'): 8,
pd.Timestamp('2019-08-09 00:00:00'): 5,
pd.Timestamp('2019-08-10 00:00:00'): 5,
pd.Timestamp('2019-08-11 00:00:00'): 10,
pd.Timestamp('2019-08-12 00:00:00'): 10,
pd.Timestamp('2019-08-13 00:00:00'): 6,
pd.Timestamp('2019-08-14 00:00:00'): 7,
pd.Timestamp('2019-08-15 00:00:00'): 10,
pd.Timestamp('2019-08-16 00:00:00'): 8,
pd.Timestamp('2019-08-17 00:00:00'): 7,
pd.Timestamp('2019-08-18 00:00:00'): 9,
pd.Timestamp('2019-08-19 00:00:00'): 5,
pd.Timestamp('2019-08-20 00:00:00'): 5},
'total_discount_amount': {pd.Timestamp('2019-08-01 00:00:00'): 15152814.736907512,
pd.Timestamp('2019-08-02 00:00:00'): 20061245.64408109,
pd.Timestamp('2019-08-03 00:00:00'): 26441693.751396574,
pd.Timestamp('2019-08-04 00:00:00'): 25783015.567048658,
pd.Timestamp('2019-08-05 00:00:00'): 16649773.993076814,
pd.Timestamp('2019-08-06 00:00:00'): 24744027.428384878,
pd.Timestamp('2019-08-07 00:00:00'): 21641181.771564845,
pd.Timestamp('2019-08-08 00:00:00'): 27012160.85245146,
pd.Timestamp('2019-08-09 00:00:00'): 13806814.237002019,
pd.Timestamp('2019-08-10 00:00:00'): 9722459.599448118,
pd.Timestamp('2019-08-11 00:00:00'): 20450260.26194652,
pd.Timestamp('2019-08-12 00:00:00'): 22125711.151501,
pd.Timestamp('2019-08-13 00:00:00'): 11444206.200090334,
pd.Timestamp('2019-08-14 00:00:00'): 17677326.65707852,
pd.Timestamp('2019-08-15 00:00:00'): 26968819.12338184,
pd.Timestamp('2019-08-16 00:00:00'): 22592246.991756547,
pd.Timestamp('2019-08-17 00:00:00'): 15997597.519811645,
pd.Timestamp('2019-08-18 00:00:00'): 17731498.506244037,
pd.Timestamp('2019-08-19 00:00:00'): 22127822.876592986,
pd.Timestamp('2019-08-20 00:00:00'): 5550506.789972418},
'items': {pd.Timestamp('2019-08-01 00:00:00'): 2895,
pd.Timestamp('2019-08-02 00:00:00'): 3082,
pd.Timestamp('2019-08-03 00:00:00'): 3559,
pd.Timestamp('2019-08-04 00:00:00'): 3582,
pd.Timestamp('2019-08-05 00:00:00'): 2768,
pd.Timestamp('2019-08-06 00:00:00'): 3431,
pd.Timestamp('2019-08-07 00:00:00'): 2767,
pd.Timestamp('2019-08-08 00:00:00'): 2643,
pd.Timestamp('2019-08-09 00:00:00'): 1506,
pd.Timestamp('2019-08-10 00:00:00'): 1443,
pd.Timestamp('2019-08-11 00:00:00'): 2466,
pd.Timestamp('2019-08-12 00:00:00'): 3482,
pd.Timestamp('2019-08-13 00:00:00'): 1940,
pd.Timestamp('2019-08-14 00:00:00'): 1921,
pd.Timestamp('2019-08-15 00:00:00'): 3479,
pd.Timestamp('2019-08-16 00:00:00'): 3053,
pd.Timestamp('2019-08-17 00:00:00'): 2519,
pd.Timestamp('2019-08-18 00:00:00'): 2865,
pd.Timestamp('2019-08-19 00:00:00'): 1735,
pd.Timestamp('2019-08-20 00:00:00'): 1250},
'order_total_avg': {pd.Timestamp('2019-08-01 00:00:00'): 1182286.0960463749,
pd.Timestamp('2019-08-02 00:00:00'): 1341449.559055637,
pd.Timestamp('2019-08-03 00:00:00'): 1270616.0372525519,
pd.Timestamp('2019-08-04 00:00:00'): 1069011.1516039693,
pd.Timestamp('2019-08-05 00:00:00'): 1355304.7342628485,
pd.Timestamp('2019-08-06 00:00:00'): 1283968.435650978,
pd.Timestamp('2019-08-07 00:00:00'): 1319110.4787216866,
pd.Timestamp('2019-08-08 00:00:00'): 1027231.5196824896,
pd.Timestamp('2019-08-09 00:00:00'): 1201471.0717715647,
pd.Timestamp('2019-08-10 00:00:00'): 1314611.2300065856,
pd.Timestamp('2019-08-11 00:00:00'): 1186152.4565363638,
pd.Timestamp('2019-08-12 00:00:00'): 1155226.4552911327,
pd.Timestamp('2019-08-13 00:00:00'): 1346981.8930212667,
pd.Timestamp('2019-08-14 00:00:00'): 1019646.0386455443,
pd.Timestamp('2019-08-15 00:00:00'): 1286793.278547962,
pd.Timestamp('2019-08-16 00:00:00'): 1254721.8660029566,
pd.Timestamp('2019-08-17 00:00:00'): 1419237.673786449,
pd.Timestamp('2019-08-18 00:00:00'): 1173087.9508403398,
pd.Timestamp('2019-08-19 00:00:00'): 1162434.8033358732,
pd.Timestamp('2019-08-20 00:00:00'): 1046669.750923031},
'discount_rate_avg': {pd.Timestamp('2019-08-01 00:00:00'): 0.1252497888814673,
pd.Timestamp('2019-08-02 00:00:00'): 0.12950211356271726,
pd.Timestamp('2019-08-03 00:00:00'): 0.1490744307031331,
pd.Timestamp('2019-08-04 00:00:00'): 0.15162918618667656,
pd.Timestamp('2019-08-05 00:00:00'): 0.13130630218741238,
pd.Timestamp('2019-08-06 00:00:00'): 0.13373546744128126,
pd.Timestamp('2019-08-07 00:00:00'): 0.15567735848995318,
pd.Timestamp('2019-08-08 00:00:00'): 0.20265603603112725,
| pd.Timestamp('2019-08-09 00:00:00') | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
"""
make_herbarium_2022_catalog_df.py
"""
#
# Description:
#
# Created On: Sunday Feb 27th, 2022
# Created By: <NAME>
# ### Key constants
# DATASETS_ROOT = "/media/data_cifs/projects/prj_fossils/data/processed_data/leavesdb-v1_1/images"
# EXTANT_ROOT = "/media/data_cifs/projects/prj_fossils/data/processed_data/leavesdb-v1_1/images/Extant_Leaves/original/full/jpg"
# GENERAL_ROOT = "/media/data_cifs/projects/prj_fossils/data/processed_data/leavesdb-v1_1/images/Fossil/General_Fossil/original/full/jpg"
# FLORISSANT_ROOT = "/media/data_cifs/projects/prj_fossils/data/processed_data/leavesdb-v1_1/images/Fossil/Florissant_Fossil/original/full/jpg"
# with open(os.path.join(HERBARIUM_ROOT, "train_metadata.json")) as fp:
# train_data = json.load(fp)
# with open(os.path.join(HERBARIUM_ROOT, "test_metadata.json")) as fp:
# test_data = json.load(fp)
# for k,v in train_data.items():
# print(k, f"| Total:{len(v)}")
# print("First:", v[0])
# print("Last:", v[-1])
# print("="*15+"\n")
# assert len(train_data["annotations"]) == len(train_data["images"])
import argparse
import os
import sys
from typing import *
import json
import pandas as pd
from pathlib import Path
import seaborn as sns
import matplotlib.pyplot as plt
from rich import print as pp
# HERBARIUM_ROOT_DEFAULT = "/media/data_cifs/projects/prj_fossils/data/raw_data/herbarium-2022-fgvc9_resize"
# from dotenv import load_dotenv
# load_dotenv()
import imutils
from imutils.big.split_catalog_utils import TRAIN_KEY, VAL_KEY, TEST_KEY
HERBARIUM_ROOT_DEFAULT = os.environ["HERBARIUM_ROOT_DEFAULT"]
CATALOG_DIR = os.environ["CATALOG_DIR"]
SPLITS_DIR = os.environ["SPLITS_DIR"]
def optimize_dtypes_train(df: pd.DataFrame) -> pd.DataFrame:
"""
Convert column dtypes to optimal type for herbarium train metadata df.
"""
# Reduce total df size by optimizing dtypes per column
cat_cols = ['genus_id', 'institution_id', 'category_id',
'scientificName', 'family', 'genus', 'species','Species',
'collectionCode', 'license', 'authors']
if "y" in df.columns:
cat_cols.append("y")
str_cols = ['image_id', 'file_name', 'path']
col_dtypes = {c:"category" for c in cat_cols if c in df.columns}
col_dtypes.update({c:"string" for c in str_cols})
# df = df.convert_dtypes()
df = df.astype(col_dtypes)
return df
def optimize_dtypes_test(df: pd.DataFrame) -> pd.DataFrame:
"""
Convert column dtypes to optimal type for herbarium test metadata df.
"""
dtypes_test = {'image_id':"string",
'file_name':"string",
'license':"category",
'path':"string"}
dtypes_test= {col:dtype for col, dtype in dtypes_test.items() if col in df.columns}
# Reduce total df size by optimizing dtypes per column
df = df.astype(dtypes_test)
return df
def read_train_df_from_csv(train_path,
nrows: Optional[int]=None,
index_col: int=0
) -> pd.DataFrame:
df = pd.read_csv(train_path, index_col=index_col, nrows=nrows)
df = optimize_dtypes_train(df)
return df
def read_test_df_from_csv(test_path,
nrows: Optional[int]=None,
index_col: int=0
) -> pd.DataFrame:
df = pd.read_csv(test_path, index_col=index_col, nrows=nrows)
df = optimize_dtypes_test(df)
return df
def read_all_from_csv(root_dir: str=None,
source_csv_paths: Optional[List[str]]=None,
subset_read_funcs: Union[Callable, Dict[str, Callable]]={
TRAIN_KEY: read_train_df_from_csv,
TEST_KEY: read_test_df_from_csv
},
return_dict: bool=False,
**kwargs) -> Tuple[pd.DataFrame]:
"""
Read the train_metadata.csv and test_metadata.csv files from `root_dir`
Note: This is prior to any train-val splits.
"""
if source_csv_paths is not None:
train_path, test_path = sorted(source_csv_paths)[::-1]
else:
train_path = Path(root_dir, "train_metadata.csv")
test_path = Path(root_dir, "test_metadata.csv")
if isinstance(subset_read_funcs, Callable):
train_df = subset_read_funcs(train_path)
test_df = subset_read_funcs(test_path)
else:
train_df = subset_read_funcs[TRAIN_KEY](train_path)
test_df = subset_read_funcs[TEST_KEY](test_path)
# train_df = read_train_df_from_csv(train_path)
# test_df = read_test_df_from_csv(test_path)
if return_dict:
return {
TRAIN_KEY: train_df,
TEST_KEY: test_df
}
return train_df, test_df
# read_train_df_from_csv,
# read_test_df_from_csv
###################################
###################################
class HerbariumMetadata:
TRAIN_KEYS = ['annotations', 'images', 'categories', 'genera', 'institutions', 'distances', 'license']
TEST_KEYS = ['image_id', 'file_name', 'license']
def __init__(self,
herbarium_root: str=HERBARIUM_ROOT_DEFAULT):
self.herbarium_root = herbarium_root
def get_train_df(self) -> pd.DataFrame:
metadata_path = Path(self.herbarium_root, "train_metadata.json")
with open(os.path.join(metadata_path)) as fp:
train_data = json.load(fp)
assert all([k in train_data.keys() for k in self.TRAIN_KEYS])
train_annotations = pd.DataFrame(train_data['annotations'])
train_categories = | pd.DataFrame(train_data['categories']) | pandas.DataFrame |
import unittest
import copy
import numpy as np
import numpy.testing as np_test
import pandas as pd
import pandas.testing as pd_test
import warnings
from pyblackscholesanalytics.market.market import MarketEnvironment
from pyblackscholesanalytics.options.options import PlainVanillaOption, DigitalOption
from pyblackscholesanalytics.utils.utils import scalarize
class TestPlainVanillaOption(unittest.TestCase):
"""Class to test public methods of PlainVanillaOption class"""
def setUp(self) -> None:
warnings.filterwarnings("ignore")
# common market environment
mkt_env = MarketEnvironment()
# option objects
self.call_opt = PlainVanillaOption(mkt_env)
self.put_opt = PlainVanillaOption(mkt_env, option_type="put")
# pricing parameters
S_scalar = 100
S_vector = [90, 100, 110]
t_scalar_string = "01-06-2020"
t_date_range = pd.date_range(start="2020-04-19", end="2020-12-21", periods=5)
# common pricing parameter setup
common_params = {"np_output": True, "minimization_method": "Least-Squares"}
# scalar parameters setup
self.scalar_params = copy.deepcopy(common_params)
self.scalar_params["S"] = S_scalar
self.scalar_params["t"] = t_scalar_string
# vector parameters setup
self.vector_params = copy.deepcopy(common_params)
self.vector_params["S"] = S_vector
self.vector_params["t"] = t_date_range
# complex pricing parameter setup
# (S scalar, K and t vector, sigma distributed as Kxt grid, r distributed as Kxt grid)
K_vector = [75, 85, 90, 95]
mK = len(K_vector)
n = 3
sigma_grid_K = np.array([0.1 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
r_grid_K = np.array([0.01 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
self.complex_params = {"S": S_vector[0],
"K": K_vector,
"t": pd.date_range(start="2020-04-19", end="2020-12-21", periods=n),
"sigma": sigma_grid_K,
"r": r_grid_K,
"np_output": False,
"minimization_method": "Least-Squares"}
def test_price_scalar(self):
"""Test price - scalar case"""
# call
test_call = scalarize(self.call_opt.price(**self.scalar_params))
expected_call = 7.548381716811839
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.price(**self.scalar_params))
expected_put = 4.672730506407959
self.assertEqual(test_put, expected_put)
def test_price_vector_np(self):
"""Test price - np.ndarray output case"""
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = np.array([[3.48740247e+00, 8.42523213e+00, 1.55968082e+01],
[2.53045128e+00, 7.14167587e+00, 1.43217796e+01],
[1.56095778e+00, 5.72684668e+00, 1.29736886e+01],
[5.89165298e-01, 4.00605304e+00, 1.14939139e+01],
[7.21585753e-04, 1.38927959e+00, 1.01386434e+01]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = np.array([[1.00413306e+01, 4.97916024e+00, 2.15073633e+00],
[9.90791873e+00, 4.51914332e+00, 1.69924708e+00],
[9.75553655e+00, 3.92142545e+00, 1.16826738e+00],
[9.62127704e+00, 3.03816479e+00, 5.26025639e-01],
[9.86382907e+00, 1.25238707e+00, 1.75090342e-03]])
np_test.assert_allclose(test_put, expected_put)
def test_price_vector_df(self):
"""Test price - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = pd.DataFrame(data=[[3.48740247e+00, 8.42523213e+00, 1.55968082e+01],
[2.53045128e+00, 7.14167587e+00, 1.43217796e+01],
[1.56095778e+00, 5.72684668e+00, 1.29736886e+01],
[5.89165298e-01, 4.00605304e+00, 1.14939139e+01],
[7.21585753e-04, 1.38927959e+00, 1.01386434e+01]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = pd.DataFrame(data=[[1.00413306e+01, 4.97916024e+00, 2.15073633e+00],
[9.90791873e+00, 4.51914332e+00, 1.69924708e+00],
[9.75553655e+00, 3.92142545e+00, 1.16826738e+00],
[9.62127704e+00, 3.03816479e+00, 5.26025639e-01],
[9.86382907e+00, 1.25238707e+00, 1.75090342e-03]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_PnL_scalar(self):
"""Test P&L - scalar case"""
# call
test_call = scalarize(self.call_opt.PnL(**self.scalar_params))
expected_call = 4.060979245868182
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.PnL(**self.scalar_params))
expected_put = -5.368600081057167
self.assertEqual(test_put, expected_put)
def test_PnL_vector_np(self):
"""Test P&L - np.ndarray output case"""
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = np.array([[0., 4.93782966, 12.10940574],
[-0.95695119, 3.6542734, 10.83437716],
[-1.92644469, 2.2394442, 9.48628613],
[-2.89823717, 0.51865057, 8.00651142],
[-3.48668089, -2.09812288, 6.65124095]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = np.array([[0., -5.06217034, -7.89059426],
[-0.13341186, -5.52218727, -8.3420835],
[-0.28579403, -6.11990513, -8.87306321],
[-0.42005355, -7.0031658, -9.51530495],
[-0.17750152, -8.78894351, -10.03957968]])
np_test.assert_allclose(test_put, expected_put)
def test_PnL_vector_df(self):
"""Test P&L - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = pd.DataFrame(data=[[0., 4.93782966, 12.10940574],
[-0.95695119, 3.6542734, 10.83437716],
[-1.92644469, 2.2394442, 9.48628613],
[-2.89823717, 0.51865057, 8.00651142],
[-3.48668089, -2.09812288, 6.65124095]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = pd.DataFrame(data=[[0., -5.06217034, -7.89059426],
[-0.13341186, -5.52218727, -8.3420835],
[-0.28579403, -6.11990513, -8.87306321],
[-0.42005355, -7.0031658, -9.51530495],
[-0.17750152, -8.78894351, -10.03957968]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_delta_scalar(self):
"""Test Delta - scalar case"""
# call
test_call = scalarize(self.call_opt.delta(**self.scalar_params))
expected_call = 0.6054075531684143
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.delta(**self.scalar_params))
expected_put = -0.3945924468315857
self.assertEqual(test_put, expected_put)
def test_delta_vector_np(self):
"""Test Delta - np.ndarray output case"""
# call
test_call = self.call_opt.delta(**self.vector_params)
expected_call = np.array([[3.68466757e-01, 6.15283790e-01, 8.05697003e-01],
[3.20097309e-01, 6.00702480e-01, 8.18280131e-01],
[2.54167521e-01, 5.83663527e-01, 8.41522350e-01],
[1.49152172e-01, 5.61339299e-01, 8.91560577e-01],
[8.89758553e-04, 5.23098767e-01, 9.98343116e-01]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.delta(**self.vector_params)
expected_put = np.array([[-0.63153324, -0.38471621, -0.194303],
[-0.67990269, -0.39929752, -0.18171987],
[-0.74583248, -0.41633647, -0.15847765],
[-0.85084783, -0.4386607, -0.10843942],
[-0.99911024, -0.47690123, -0.00165688]])
np_test.assert_allclose(test_put, expected_put, rtol=5e-6)
def test_delta_vector_df(self):
"""Test Delta - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.delta(**self.vector_params)
expected_call = pd.DataFrame(data=[[3.68466757e-01, 6.15283790e-01, 8.05697003e-01],
[3.20097309e-01, 6.00702480e-01, 8.18280131e-01],
[2.54167521e-01, 5.83663527e-01, 8.41522350e-01],
[1.49152172e-01, 5.61339299e-01, 8.91560577e-01],
[8.89758553e-04, 5.23098767e-01, 9.98343116e-01]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.delta(**self.vector_params)
expected_put = pd.DataFrame(data=[[-0.63153324, -0.38471621, -0.194303],
[-0.67990269, -0.39929752, -0.18171987],
[-0.74583248, -0.41633647, -0.15847765],
[-0.85084783, -0.4386607, -0.10843942],
[-0.99911024, -0.47690123, -0.00165688]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_gamma_scalar(self):
"""Test Gamma - scalar case"""
# call
test_call = scalarize(self.call_opt.gamma(**self.scalar_params))
expected_call = 0.025194958512498786
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.gamma(**self.scalar_params))
expected_put = copy.deepcopy(expected_call)
self.assertEqual(test_put, expected_put)
# assert call and put gamma coincide
self.assertEqual(test_call, test_put)
def test_gamma_vector_np(self):
"""Test Gamma - np.ndarray output case"""
# call
test_call = self.call_opt.gamma(**self.vector_params)
expected_call = np.array([[0.02501273, 0.02281654, 0.01493167],
[0.02725456, 0.02648423, 0.01645793],
[0.02950243, 0.03231528, 0.01820714],
[0.02925862, 0.0446913, 0.01918121],
[0.00101516, 0.12030889, 0.00146722]])
np_test.assert_allclose(test_call, expected_call, rtol=5e-6)
# put
test_put = self.put_opt.gamma(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
np_test.assert_allclose(test_put, expected_put, rtol=5e-6)
# assert call and put gamma coincide
np_test.assert_allclose(test_call, test_put)
def test_gamma_vector_df(self):
"""Test Gamma - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.gamma(**self.vector_params)
expected_call = pd.DataFrame(data=[[0.02501273, 0.02281654, 0.01493167],
[0.02725456, 0.02648423, 0.01645793],
[0.02950243, 0.03231528, 0.01820714],
[0.02925862, 0.0446913, 0.01918121],
[0.00101516, 0.12030889, 0.00146722]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.gamma(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
pd_test.assert_frame_equal(test_put, expected_put)
# assert call and put gamma coincide
pd_test.assert_frame_equal(test_call, test_put)
def test_vega_scalar(self):
"""Test Vega - scalar case"""
# call
test_call = scalarize(self.call_opt.vega(**self.scalar_params))
expected_call = 0.29405622811847903
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.vega(**self.scalar_params))
expected_put = copy.deepcopy(expected_call)
self.assertEqual(test_put, expected_put)
# assert call and put vega coincide
self.assertEqual(test_call, test_put)
def test_vega_vector_np(self):
"""Test Vega - np.ndarray output case"""
# call
test_call = self.call_opt.vega(**self.vector_params)
expected_call = np.array([[0.28419942, 0.32005661, 0.2534375],
[0.23467293, 0.28153094, 0.21168961],
[0.17415326, 0.23550311, 0.16055207],
[0.09220072, 0.17386752, 0.09029355],
[0.00045056, 0.06592268, 0.00097279]])
np_test.assert_allclose(test_call, expected_call, rtol=1e-5)
# put
test_put = self.put_opt.vega(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
# assert call and put vega coincide
np_test.assert_allclose(test_call, test_put)
def test_vega_vector_df(self):
"""Test Vega - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.vega(**self.vector_params)
expected_call = pd.DataFrame(data=[[0.28419942, 0.32005661, 0.2534375],
[0.23467293, 0.28153094, 0.21168961],
[0.17415326, 0.23550311, 0.16055207],
[0.09220072, 0.17386752, 0.09029355],
[0.00045056, 0.06592268, 0.00097279]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.vega(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
# assert call and put vega coincide
pd_test.assert_frame_equal(test_call, test_put)
def test_theta_scalar(self):
"""Test Theta - scalar case"""
# call
test_call = scalarize(self.call_opt.theta(**self.scalar_params))
expected_call = -0.021064685979455443
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.theta(**self.scalar_params))
expected_put = -0.007759980665812141
self.assertEqual(test_put, expected_put)
def test_theta_vector_np(self):
"""Test Theta - np.ndarray output case"""
# call
test_call = self.call_opt.theta(**self.vector_params)
expected_call = np.array([[-0.01516655, -0.01977662, -0.01990399],
[-0.01569631, -0.02176239, -0.0212802],
[-0.01601397, -0.02491789, -0.02297484],
[-0.01474417, -0.03162919, -0.02457737],
[-0.00046144, -0.0728981, -0.01462746]])
np_test.assert_allclose(test_call, expected_call, rtol=5e-4)
# put
test_put = self.put_opt.theta(**self.vector_params)
expected_put = np.array([[-0.00193999, -0.00655005, -0.00667743],
[-0.00235693, -0.00842301, -0.00794082],
[-0.00256266, -0.01146658, -0.00952353],
[-0.00117813, -0.01806315, -0.01101133],
[0.01321844, -0.05921823, -0.00094758]])
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
def test_theta_vector_df(self):
"""Test Theta - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.theta(**self.vector_params)
expected_call = pd.DataFrame(data=[[-0.01516655, -0.01977662, -0.01990399],
[-0.01569631, -0.02176239, -0.0212802],
[-0.01601397, -0.02491789, -0.02297484],
[-0.01474417, -0.03162919, -0.02457737],
[-0.00046144, -0.0728981, -0.01462746]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.theta(**self.vector_params)
expected_put = pd.DataFrame(data=[[-0.00193999, -0.00655005, -0.00667743],
[-0.00235693, -0.00842301, -0.00794082],
[-0.00256266, -0.01146658, -0.00952353],
[-0.00117813, -0.01806315, -0.01101133],
[0.01321844, -0.05921823, -0.00094758]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
def test_rho_scalar(self):
"""Test Rho - scalar case"""
# call
test_call = scalarize(self.call_opt.rho(**self.scalar_params))
expected_call = 0.309243166487844
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.rho(**self.scalar_params))
expected_put = -0.2575372798733608
self.assertEqual(test_put, expected_put)
def test_rho_vector_np(self):
"""Test Rho - np.ndarray output case"""
# call
test_call = self.call_opt.rho(**self.vector_params)
expected_call = np.array([[2.08128741e-01, 3.72449469e-01, 5.12209444e-01],
[1.39670999e-01, 2.81318986e-01, 4.02292404e-01],
[7.76651463e-02, 1.91809707e-01, 2.90026614e-01],
[2.49657984e-02, 1.01399432e-01, 1.68411513e-01],
[2.17415573e-05, 1.39508485e-02, 2.73093423e-02]])
np_test.assert_allclose(test_call, expected_call, rtol=1e-5)
# put
test_put = self.put_opt.rho(**self.vector_params)
expected_put = np.array([[-4.69071412e-01, -3.04750685e-01, -1.64990710e-01],
[-3.77896910e-01, -2.36248923e-01, -1.15275505e-01],
[-2.80139757e-01, -1.65995197e-01, -6.77782897e-02],
[-1.67672008e-01, -9.12383748e-02, -2.42262934e-02],
[-2.73380139e-02, -1.34089069e-02, -5.04131783e-05]])
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
def test_rho_vector_df(self):
"""Test Theta - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.rho(**self.vector_params)
expected_call = pd.DataFrame(data=[[2.08128741e-01, 3.72449469e-01, 5.12209444e-01],
[1.39670999e-01, 2.81318986e-01, 4.02292404e-01],
[7.76651463e-02, 1.91809707e-01, 2.90026614e-01],
[2.49657984e-02, 1.01399432e-01, 1.68411513e-01],
[2.17415573e-05, 1.39508485e-02, 2.73093423e-02]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.rho(**self.vector_params)
expected_put = pd.DataFrame(data=[[-4.69071412e-01, -3.04750685e-01, -1.64990710e-01],
[-3.77896910e-01, -2.36248923e-01, -1.15275505e-01],
[-2.80139757e-01, -1.65995197e-01, -6.77782897e-02],
[-1.67672008e-01, -9.12383748e-02, -2.42262934e-02],
[-2.73380139e-02, -1.34089069e-02, -5.04131783e-05]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
def test_Implied_Vol_scalar(self):
"""Test Implied Volatility - scalar case"""
# call
test_call = scalarize(self.call_opt.implied_volatility(**self.scalar_params))
expected_call = 0.2
self.assertAlmostEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.implied_volatility(**self.scalar_params))
expected_put = 0.2
self.assertAlmostEqual(test_put, expected_put)
def test_Implied_Vol_vector_np(self):
"""Test Implied Volatility - np.ndarray output case"""
# call
test_call = self.call_opt.implied_volatility(**self.vector_params)
expected_call = 0.2 + np.zeros_like(test_call)
np_test.assert_allclose(test_call, expected_call, rtol=1e-5)
# put
test_put = self.put_opt.implied_volatility(**self.vector_params)
expected_put = 0.2 + np.zeros_like(test_put)
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
def test_Implied_Vol_vector_df(self):
"""Test Implied Volatility - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.implied_volatility(**self.vector_params)
expected_call = pd.DataFrame(data=0.2 + np.zeros_like(test_call),
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.implied_volatility(**self.vector_params)
expected_put = pd.DataFrame(data=0.2 + np.zeros_like(test_put),
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
def test_complex_parameters_setup(self):
"""
Test complex parameter setup:
(S scalar, K and t vector, sigma distributed as Kxt grid, r distributed as Kxt grid)
"""
# call
test_call_price = self.call_opt.price(**self.complex_params)
test_call_PnL = self.call_opt.PnL(**self.complex_params)
test_call_delta = self.call_opt.delta(**self.complex_params)
test_call_gamma = self.call_opt.gamma(**self.complex_params)
test_call_vega = self.call_opt.vega(**self.complex_params)
test_call_theta = self.call_opt.theta(**self.complex_params)
test_call_rho = self.call_opt.rho(**self.complex_params)
test_call_iv = self.call_opt.implied_volatility(**self.complex_params)
expected_call_price = pd.DataFrame(data=[[15.55231058, 9.40714796, 9.87150919, 10.97983523],
[20.05777231, 16.15277891, 16.02977848, 16.27588191],
[15.81433361, 8.75227505, 6.65476799, 5.19785143]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_price.rename_axis("K", axis='columns', inplace=True)
expected_call_price.rename_axis("t", axis='rows', inplace=True)
expected_call_PnL = pd.DataFrame(data=[[12.06490811, 5.91974549, 6.38410672, 7.49243276],
[16.57036984, 12.66537644, 12.54237601, 12.78847944],
[12.32693114, 5.26487258, 3.16736552, 1.71044896]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_PnL.rename_axis("K", axis='columns', inplace=True)
expected_call_PnL.rename_axis("t", axis='rows', inplace=True)
expected_call_delta = pd.DataFrame(data=[[0.98935079, 0.69453583, 0.58292013, 0.53579465],
[0.79256302, 0.65515368, 0.60705014, 0.57529078],
[0.90573251, 0.6717088, 0.54283905, 0.43788167]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_delta.rename_axis("K", axis='columns', inplace=True)
expected_call_delta.rename_axis("t", axis='rows', inplace=True)
expected_call_gamma = pd.DataFrame(data=[[0.00373538, 0.02325203, 0.01726052, 0.01317896],
[0.01053321, 0.01130107, 0.01011038, 0.0090151],
[0.01253481, 0.0242596, 0.02420515, 0.02204576]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_gamma.rename_axis("K", axis='columns', inplace=True)
expected_call_gamma.rename_axis("t", axis='rows', inplace=True)
expected_call_vega = pd.DataFrame(data=[[0.02122104, 0.26419398, 0.29417607, 0.29948378],
[0.15544424, 0.20013116, 0.20888592, 0.2128651],
[0.02503527, 0.05383637, 0.05908709, 0.05870816]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_vega.rename_axis("K", axis='columns', inplace=True)
expected_call_vega.rename_axis("t", axis='rows', inplace=True)
expected_call_theta = pd.DataFrame(data=[[-0.00242788, -0.01322973, -0.02073753, -0.02747845],
[-0.03624253, -0.0521798, -0.06237363, -0.07180046],
[-0.12885912, -0.28334665, -0.33769702, -0.36349655]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_theta.rename_axis("K", axis='columns', inplace=True)
expected_call_theta.rename_axis("t", axis='rows', inplace=True)
expected_call_rho = pd.DataFrame(data=[[0.51543152, 0.37243495, 0.29872256, 0.26120194],
[0.18683002, 0.15599644, 0.14066931, 0.12935721],
[0.01800044, 0.0141648, 0.01156185, 0.00937301]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_rho.rename_axis("K", axis='columns', inplace=True)
expected_call_rho.rename_axis("t", axis='rows', inplace=True)
expected_call_iv = pd.DataFrame(data=self.complex_params["sigma"],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_call_iv.rename_axis("K", axis='columns', inplace=True)
expected_call_iv.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call_price, expected_call_price)
pd_test.assert_frame_equal(test_call_PnL, expected_call_PnL)
pd_test.assert_frame_equal(test_call_delta, expected_call_delta)
pd_test.assert_frame_equal(test_call_gamma, expected_call_gamma)
pd_test.assert_frame_equal(test_call_vega, expected_call_vega)
pd_test.assert_frame_equal(test_call_theta, expected_call_theta)
pd_test.assert_frame_equal(test_call_rho, expected_call_rho)
pd_test.assert_frame_equal(test_call_iv, expected_call_iv)
# put
test_put_price = self.put_opt.price(**self.complex_params)
test_put_PnL = self.put_opt.PnL(**self.complex_params)
test_put_delta = self.put_opt.delta(**self.complex_params)
test_put_gamma = self.put_opt.gamma(**self.complex_params)
test_put_vega = self.put_opt.vega(**self.complex_params)
test_put_theta = self.put_opt.theta(**self.complex_params)
test_put_rho = self.put_opt.rho(**self.complex_params)
test_put_iv = self.put_opt.implied_volatility(**self.complex_params)
expected_put_price = pd.DataFrame(data=[[0.02812357, 3.22314287, 7.9975943, 13.35166847],
[3.70370639, 9.31459014, 13.76319167, 18.54654119],
[0.62962992, 3.51971706, 6.38394341, 9.88603552]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_price.rename_axis("K", axis='columns', inplace=True)
expected_put_price.rename_axis("t", axis='rows', inplace=True)
expected_put_PnL = pd.DataFrame(data=[[-10.01320701, -6.81818772, -2.04373628, 3.31033788],
[-6.3376242, -0.72674045, 3.72186108, 8.5052106],
[-9.41170067, -6.52161353, -3.65738717, -0.15529507]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_PnL.rename_axis("K", axis='columns', inplace=True)
expected_put_PnL.rename_axis("t", axis='rows', inplace=True)
expected_put_delta = pd.DataFrame(data=[[-0.01064921, -0.30546417, -0.41707987, -0.46420535],
[-0.20743698, -0.34484632, -0.39294986, -0.42470922],
[-0.09426749, -0.3282912, -0.45716095, -0.56211833]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_delta.rename_axis("K", axis='columns', inplace=True)
expected_put_delta.rename_axis("t", axis='rows', inplace=True)
expected_put_gamma = copy.deepcopy(expected_call_gamma)
expected_put_vega = copy.deepcopy(expected_call_vega)
expected_put_theta = pd.DataFrame(data=[[-0.00038744, -0.00863707, -0.01349429, -0.01735551],
[-0.02615404, -0.03850937, -0.04554804, -0.05157676],
[-0.11041151, -0.26012269, -0.31065535, -0.33236619]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_theta.rename_axis("K", axis='columns', inplace=True)
expected_put_theta.rename_axis("t", axis='rows', inplace=True)
expected_put_rho = pd.DataFrame(data=[[-0.00691938, -0.21542518, -0.31936724, -0.38666626],
[-0.08152366, -0.14703153, -0.17901683, -0.2068619],
[-0.00249691, -0.00905916, -0.01302149, -0.01656895]],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_rho.rename_axis("K", axis='columns', inplace=True)
expected_put_rho.rename_axis("t", axis='rows', inplace=True)
expected_put_iv = pd.DataFrame(data=self.complex_params["sigma"],
index=self.complex_params["t"],
columns=self.complex_params["K"])
expected_put_iv.rename_axis("K", axis='columns', inplace=True)
expected_put_iv.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put_price, expected_put_price)
pd_test.assert_frame_equal(test_put_PnL, expected_put_PnL)
| pd_test.assert_frame_equal(test_put_delta, expected_put_delta) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
from copy import deepcopy
import itertools
import multiprocessing
from multiprocessing import Pool
import time
import numpy as np
import pandas as pd
from tqdm import tqdm
def set_pandas_options(max_columns=None, max_rows=None):
| pd.set_option("display.max_columns", max_columns) | pandas.set_option |
import csv
import os
import pandas as pd
import math
import numpy as np
POIEdges = {'Sathorn_Thai_1': ['L197#1', 'L197#2'],
'Sathorn_Thai_2': ['L30', 'L58#1', 'L58#2'],
'Charoenkrung_1': ['L30032'],
'Charoenkrung_2': ['L60', 'L73', 'L10149#1', 'L10149#2'],
'Charoenkrung_3': ['L67'],
'Silom_1': ['L138'],
'Silom_2': ['L133.25'],
'Silom_3': ['L49'],
'Mehasak': ['L64'],
'Surasak': ['L10130', 'L10189'],
'Charoen_Rat': ['L40']
}
percentage = ['100%','1%','5%','10%','15%','20%','25%','30%','35%','40%','45%','50%']
resolution = ['1','5','10','15','20','25','30','35','40','45','50','55','60']
def createFileForMean(fileNo):
# to get the current directory
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
os.mkdir(dirpath + '/' + str(fileNo) + '/statistics')
for time_resolution in resolution:
myfile1 = open(
dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_mean.csv', 'w', newline='')
writer1 = csv.writer(myfile1)
#print(len(percentage))
heading = ["Road Name",*percentage]
writer1.writerow(heading)
myfile1.close()
def createFileForStd(fileNo):
# to get the current directory
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
for freq in resolution:
myfile1 = open(
dirpath + '/'+str(fileNo)+'/statistics/' + str(freq) + '_std.csv', 'w', newline='')
writer1 = csv.writer(myfile1)
# print(len(percentage))
heading = ["Road Name", *percentage]
writer1.writerow(heading)
myfile1.close()
def parseFloat(str):
try:
return float(str)
except:
str = str.strip()
if str.endswith("%"):
return float(str.strip("%").strip()) / 100
raise Exception("Don't know how to parse %s" % str)
def statisticsForResolution_And_Percentage(fileNo):
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
###############################################
#https://www.geeksforgeeks.org/program- mplement-standard-error-mean/
# arr[] = {78.53, 79.62, 80.25, 81.05, 83.21, 83.46}
# mean = (78.53 + 79.62 + 80.25 + 81.05 + 83.21 + 83.46) / 6
# = 486.12 / 6
# = 81.02
# Sample Standard deviation = sqrt((78.53 – 81.02)
# 2 + (79.62 - 81.02)
# 2 + ...
# + (83.46 – 81.02)
# 2 / (6 – 1))
# = sqrt(19.5036 / 5)
# = 1.97502
# Standard error of mean = 1.97502 / sqrt(6)
# = 0.8063
###############################################
for time_resolution in resolution:
for edge, value in POIEdges.items():
meanSpeed = []
std = []
for pcent in percentage:
path = dirpath + '/'+str(fileNo)+'/' + edge + '_' + time_resolution + '_' + pcent + '.csv'
if (os.path.exists(path)):
link_df = pd.read_csv(path)
meanSpeed.append(link_df['Mean Speed (km/h)'].mean())
std.append(link_df['Mean Speed (km/h)'].std())
myfile = open(
dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_mean.csv', 'a', newline='')
writer = csv.writer(myfile)
with myfile:
writer.writerow([edge, *meanSpeed])
myfile.close()
myfile = open(
dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_std.csv', 'a', newline='')
writer = csv.writer(myfile)
with myfile:
writer.writerow([edge, *std])
myfile.close()
def stasticsforLoop(fileNo):
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
for time_resolution in resolution:
###################for standard deviation###############################
temp_1 =[]
temp_1.append('Mean of standard deviation for all edges')
temp_2 = []
temp_2.append('Standard Error of Mean')
path = dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_std.csv'
if (os.path.exists(path)):
road_df = pd.read_csv(path)
for column in (list(road_df)):
if column !='Road Name':
#print('Number of roads :', len(road_df))
temp_1.append(road_df[column].sum()/len(road_df)) # Mean of standard deviation for all edges
percent = parseFloat(column)
#print(time_resolution,column, road_df[column].sum())
#print(len(road_df))
temp_2.append((road_df[column].sum()/len(road_df))/math.sqrt(percent))# standard error of mean for all edges
road_df.loc[len(road_df)] = temp_1
road_df.loc[len(road_df)+1] = temp_2
road_df.to_csv(dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_std.csv',
index=False)
##################for mean ###############################
temp = []
temp.append('Mean Speed for all edges')
path = dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_mean.csv'
if (os.path.exists(path)):
road_df = pd.read_csv(path)
for column in (list(road_df)):
if column != 'Road Name':
temp.append(road_df[column].sum() / len(road_df))
road_df.loc[len(road_df)] = temp
road_df.to_csv(dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_mean.csv',
index=False)
def readTotal(fileNo):
dirpath = 'c:/RetrieveOnly100%DATAFROMSUMO_RANDOMSEED(One time)-DATASET-WithoutReplicatedVID'
####################for mean ###########################################
myfile1 = open(
dirpath + '/'+str(fileNo)+'/statistics/All_mean.csv', 'w', newline='')
writer1 = csv.writer(myfile1)
heading = ["Time Resolution",*percentage]
writer1.writerow(heading)
for time_resolution in resolution:
temp =[]
path = dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_mean.csv'
if (os.path.exists(path)):
time_df = pd.read_csv(path)
temp = time_df.iloc[-1,:].values.tolist()
#print(temp)
temp.pop(0)
writer1.writerow([time_resolution,*temp])
myfile1.close()
##################for standard deviation #################################
myfile1 = open(
dirpath + '/'+str(fileNo)+'/statistics/All_std.csv', 'w', newline='')
writer1 = csv.writer(myfile1)
heading = ["Time Resolution", *percentage]
writer1.writerow(heading)
for time_resolution in resolution:
temp = []
path = dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_std.csv'
if (os.path.exists(path)):
time_df = pd.read_csv(path)
temp = time_df.iloc[-2,:].values.tolist()
temp.pop(0)
writer1.writerow([time_resolution,*temp])
myfile1.close()
###################for standard error of mean################################################
myfile1 = open(
dirpath + '/'+str(fileNo)+'/statistics/All_stdError.csv', 'w', newline='')
writer1 = csv.writer(myfile1)
heading = ["Time Resolution", *percentage]
writer1.writerow(heading)
for time_resolution in resolution:
temp = []
path = dirpath + '/'+str(fileNo)+'/statistics/' + str(time_resolution) + '_std.csv'
if (os.path.exists(path)):
time_df = | pd.read_csv(path) | pandas.read_csv |
import os
import numpy as np
import pandas as pd
import copy
import torch
import wandb
import gc
from tqdm import tqdm
import pickle
from args import parse_args
from dkt.utils import setSeeds
from dkt.metric import get_metric
from dkt.dataloader import *
from dkt.optimizer import get_optimizer
from dkt.scheduler import get_scheduler
from dkt.criterion import get_criterion
from dkt.trainer import get_model, train, validate, process_batch, save_checkpoint
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
class Trainer:
def __init__(self):
pass
def train(self, args, train_data, valid_data, model = None):
"""훈련을 마친 모델을 반환한다"""
# args update
self.args = args
# 캐시 메모리 비우기 및 가비지 컬렉터 가동!
torch.cuda.empty_cache()
gc.collect()
# augmentation
augmented_train_data = slidding_window(train_data, args)
if len(augmented_train_data) != len(train_data):
print(f"Data Augmentation applied. Train data {len(train_data)} -> {len(augmented_train_data)}\n")
train_loader, valid_loader = get_loaders(args, augmented_train_data, valid_data)
# only when using warmup scheduler
args.total_steps = int(len(train_loader.dataset) / args.batch_size) * (args.n_epochs)
args.warmup_steps = args.total_steps // 10
model = get_model(args)
optimizer = get_optimizer(model,None, args)
scheduler = get_scheduler(optimizer, args)
early_stopping_counter = 0
best_auc = -1
best_model = model # -1
for epoch in range(args.n_epochs):
print(f"model training...{epoch}||{early_stopping_counter}/{args.patience}")
### TRAIN
train_auc, train_acc, loss_avg = train(train_loader, model, optimizer, args, None)
### VALID
valid_auc, valid_acc, preds, targets = validate(valid_loader, model, args)
# wandb.log({"lr": optimizer.param_groups[0]['lr'], "train_loss": loss_avg, "train_auc": train_auc, "train_acc":train_acc,
# "valid_auc":valid_auc, "valid_acc":valid_acc})
### TODO: model save or early stopping
if valid_auc > best_auc:
best_auc = valid_auc
best_model = copy.deepcopy(model)
early_stopping_counter = 0
else :
early_stopping_counter += 1
if early_stopping_counter >= args.patience:
print(f'EarlyStopping counter: {early_stopping_counter} out of {args.patience}')
break
# scheduler
if args.scheduler == 'plateau':
scheduler.step(best_auc)
else:
scheduler.step()
return best_model
def evaluate(self, args, model, valid_data):
"""훈련된 모델과 validation 데이터셋을 제공하면 predict 반환"""
pin_memory = False
valset = DKTDataset(valid_data, args, False)
valid_loader = torch.utils.data.DataLoader(valset, shuffle=False,
batch_size=args.batch_size,
pin_memory=pin_memory,
collate_fn=collate)
auc, acc, preds, _ = validate(valid_loader, model, args)
return preds
def test(self, args, model, test_data):
return self.evaluate(args, model, test_data)
def get_target(self, datas):
targets = []
for data in datas:
targets.append(data[-1][-1])
return np.array(targets)
class PseudoLabel:
def __init__(self, trainer, args):
self.trainer = trainer
self.args = args
self.origin_model = None
self.model_path = os.path.join(args.model_dir, args.model_name)
if os.path.exists(self.model_path):
self.load_model()
# 결과 저장용
self.models =[]
self.valid_aucs =[]
self.valid_accs =[]
def load_model(self):
model_path = os.path.join(self.args.model_dir, self.args.model_name)
print("Loading Model from:", model_path)
load_state = torch.load(model_path)
model = get_model(self.args)
# 1. load model state
model.load_state_dict(load_state['state_dict'], strict=True)
print("Loading Model from:", model_path, "...Finished.")
self.orgin_model = model
def visualize(self):
aucs = self.valid_aucs
accs = self.valid_accs
N = len(aucs)
auc_min = min(aucs)
auc_max = max(aucs)
acc_min = min(accs)
acc_max = max(accs)
experiment = ['base'] + [f'pseudo {i + 1}' for i in range(N - 1)]
df = pd.DataFrame({'experiment': experiment, 'auc': aucs, 'acc': accs})
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots(figsize=(5 + N, 7))
ax1.set_title('AUC of Pseudo Label Training Process', fontsize=16)
# Time
plt.bar(df['experiment'],
df['auc'],
color='red',
width=-0.3, align='edge',
label='AUC')
plt.ylabel('AUC (Area Under the ROC Curve)')
ax1.set_ylim(auc_min - 0.002, auc_max + 0.002)
ax1.axhline(y=aucs[0], color='r', linewidth=1)
ax1.legend(loc=2)
# AUC
ax2 = ax1.twinx()
plt.bar(df['experiment'],
df['acc'],
color='blue',
width=0.3, align='edge',
label='ACC')
plt.ylabel('ACC (Accuracy)')
ax2.grid(False)
ax2.set_ylim(acc_min - 0.002, acc_max + 0.002)
ax2.axhline(y=accs[0], color='b', linewidth=1)
ax2.legend(loc=1)
plt.show()
def train(self, args, train_data, valid_data):
model = self.trainer.train(args, train_data, valid_data)
# model 저장
self.models.append(model)
return model
def validate(self, args, model, valid_data):
valid_target = self.trainer.get_target(valid_data)
valid_predict = self.trainer.evaluate(args, model, valid_data)
# Metric
valid_auc, valid_acc = get_metric(valid_target, valid_predict)
# auc / acc 저장
self.valid_aucs.append(valid_auc)
self.valid_accs.append(valid_acc)
print(f'Valid AUC : {valid_auc} Valid ACC : {valid_acc}')
def test(self, args, model, test_data):
test_predict = self.trainer.test(args, model, test_data)
pseudo_labels = np.where(test_predict >= 0.5, 1, 0)
with open(args.write_path, 'w', encoding='utf8') as w:
print("writing prediction : {}".format(args.write_path))
w.write("id,prediction\n")
for id, p in enumerate(test_predict):
w.write('{},{}\n'.format(id,p))
return pseudo_labels
def update_train_data(self, pseudo_labels, train_data, test_data):
# pseudo 라벨이 담길 test 데이터 복사본
pseudo_test_data = copy.deepcopy(test_data)
# pseudo label 테스트 데이터 update
for test_data, pseudo_label in zip(pseudo_test_data, pseudo_labels):
test_data[-1][-1] = pseudo_label
# train data 업데이트
pseudo_train_data = np.concatenate((train_data, pseudo_test_data))
return pseudo_train_data
def run(self, N, args, train_data, valid_data, test_data):
"""
N은 두번째 과정을 몇번 반복할지 나타낸다.
즉, pseudo label를 이용한 training 횟수를 가리킨다.
"""
if N < 1:
raise ValueError(f"N must be bigger than 1, currently {N}")
# pseudo label training을 위한 준비 단계
print("Preparing for pseudo label process")
if self.origin_model :
model = self.model
self.models.append(model)
else :
model = self.train(args, train_data, valid_data)
self.validate(args, model, valid_data)
args.write_path = f'/opt/ml/pseudo/output_0.csv'
pseudo_labels = self.test(args, model, test_data)
pseudo_train_data = self.update_train_data(pseudo_labels, train_data, test_data)
# pseudo label training 원하는 횟수만큼 반복
for i in range(N):
print(f'Pseudo Label Training Process {i + 1}')
# seed
seed_everything(args.seed)
args.write_path = f'/opt/ml/pseudo/output_{i}.csv'
model = self.train(args, pseudo_train_data, valid_data)
self.validate(args, model, valid_data)
pseudo_labels = self.test(args, model, test_data)
pseudo_train_data = self.update_train_data(pseudo_labels, train_data, test_data)
# 결과 시각화
self.visualize()
def main(args):
# wandb.login()
setSeeds(args.seed)
args = add_features(args)
# args.cate_col_e = ["grade","KnowledgeTag","assessmentItemID","testId"]
# args.cate_col_d = []
# args.cont_col_e = ["ass_elp","ass_elp_o","ass_elp_x","prb_elp","prb_elp_o","prb_elp_x","test_mean","ass_mean","test_mean","prb_mean"]
# args.cont_col_d = ["elapsed"]
# args.n_cate_e = len(args.cate_col_e)
# args.n_cate_d = len(args.cate_col_d)
# args.n_cont_e = len(args.cont_col_e)
# args.n_cont_d = len(args.cont_col_d)
args.model_name = "pseudo.pt"
wandb.config.update(args)
preprocess = Preprocess(args)
preprocess.load_train_data(args.file_name)
preprocess.load_test_data(args.test_file_name, is_train_ = True)
data = preprocess.get_train_data()
train_data, valid_data = preprocess.split_data(data)
test_data = preprocess.get_test_data()
if args.sep_grade == True :
ddf = test_data[test_data['answerCode']==-1]
test2train_data = test_data[~test_data.set_index(['userID','grade']).index.isin(ddf.set_index(['userID','grade']).index)]
train_data = | pd.concat([train_data,test2train_data]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
import nose
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas.lib import Timestamp
from pandas.compat import StringIO
class UsecolsTests(object):
def test_raise_on_mixed_dtype_usecols(self):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
msg = ("The elements of 'usecols' must "
"either be all strings, all unicode, or all integers")
usecols = [0, 'b', 2]
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(data), usecols=usecols)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# see gh-5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import turtle
import pandas
screen = turtle.Screen()
screen.title("U.S. Naming Game")
image = "blank_states_img.gif"
screen.addshape(image)
turtle.shape(image)
correct_answers = []
df = | pandas.read_csv("50_states.csv") | pandas.read_csv |
import gzip
import os
import pandas as pd
from beta_rec.datasets.dataset_base import DatasetBase
from beta_rec.utils.constants import (
DEFAULT_ITEM_COL,
DEFAULT_RATING_COL,
DEFAULT_TIMESTAMP_COL,
DEFAULT_USER_COL,
)
# Download URL.
AMAZON_Amazon_Instant_Video_URL = (
"http://snap.stanford.edu/data/amazon/productGraph/categoryFiles"
"/reviews_Amazon_Instant_Video.json.gz"
)
class AmazonInstantVideo(DatasetBase):
r"""AmazonInstantVideo.
Amazon Review dataset.
"""
def __init__(self, root_dir=None):
r"""Init AmazonInstantVideo Class."""
super().__init__(
dataset_name="amazon-amazon-instant-video",
root_dir=root_dir,
url=AMAZON_Amazon_Instant_Video_URL,
)
def preprocess(self):
"""Preprocess the raw file.
Preprocess the file downloaded via the url, convert it to a dataframe consist of the user-item interaction,
and save in the processed directory.
"""
file_name = os.path.join(self.raw_path, "amazon-amazon-instant-video.json.gz")
print(f"file_name: {file_name}")
if not os.path.exists(file_name):
self.download()
# parse json data
data = self.get_data_frame_from_gzip_file(file_name)
# rename columns
data = data.rename(
columns={
"reviewerID": DEFAULT_USER_COL,
"asin": DEFAULT_ITEM_COL,
"overall": DEFAULT_RATING_COL,
"unixReviewTime": DEFAULT_TIMESTAMP_COL,
}
)
# select necessary columns
data = pd.DataFrame(
data,
columns=[
DEFAULT_USER_COL,
DEFAULT_ITEM_COL,
DEFAULT_RATING_COL,
DEFAULT_TIMESTAMP_COL,
],
)
self.save_dataframe_as_npz(
data,
os.path.join(self.processed_path, f"{self.dataset_name}_interaction.npz"),
)
def parse_gzip_file(self, path):
"""Parse gzip file.
Args:
path: the file path of gzip file.
"""
g = gzip.open(path, "rb")
for l in g:
yield eval(l)
def get_data_frame_from_gzip_file(self, path):
"""Get dataframe from a gzip file.
Args:
path the file path of gzip file.
Returns:
A dataframe extracted from the gzip file.
"""
i = 0
df = {}
for d in self.parse_gzip_file(path):
df[i] = d
i += 1
return | pd.DataFrame.from_dict(df, orient="index") | pandas.DataFrame.from_dict |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
import pandas as pd
import numpy as np
from tqdm import tqdm
import time
import logging
from sklearn.model_selection import StratifiedKFold
from keras_bert import load_trained_model_from_checkpoint, Tokenizer
from keras.optimizers import Adam
import pandas as pd
from sklearn.metrics import mean_absolute_error, accuracy_score, f1_score
from keras.layers import *
from keras.models import Model
import keras.backend as K
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint
from keras.activations import softmax
learning_rate = 5e-5
min_learning_rate = 1e-5
batch_size =32
val_batch_size = 512
pred_batch_size = 512
percent_of_epoch = 0.25 * 0.05
num_epochs = 7 //percent_of_epoch
patience = 4
nfolds=5
model_path= "./model"
bert_path = "/home/mhxia/workspace/BDCI/chinese_wwm_ext_L-12_H-768_A-12/"
config_path = bert_path + 'bert_config.json'
checkpoint_path = bert_path + 'bert_model.ckpt'
dict_path = bert_path + 'vocab.txt'
MAX_LEN = 64
token_dict = {}
with open(dict_path, 'r', encoding='utf-8') as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
tokenizer = Tokenizer(token_dict)
train= pd.read_csv('./data/train_set.csv')
test=pd.read_csv('./data/dev_set.csv',sep='\t')
train_achievements = train['question1'].values
train_requirements = train['question2'].values
labels = train['label'].values
def label_process(x):
if x==0:
return [1,0]
else:
return [0,1]
train['label']=train['label'].apply(label_process)
labels_cat=list(train['label'].values)
labels_cat=np.array(labels_cat)
test_achievements = test['question1'].values
test_requirements = test['question2'].values
print(train.shape,test.shape)
def tokenize_data(X1, X2):
T,T_ = [], []
for i, _ in enumerate(X1):
achievements = X1[i]
requirements = X2[i]
t, t_ = tokenizer.encode(first=achievements, second=requirements, max_len=MAX_LEN)
T.append(t)
T_.append(t_)
T = np.array(T)
T_ = np.array(T_)
return T, T_
def apply_multiple(input_, layers):
if not len(layers) > 1:
raise ValueError('Layers list should contain more than 1 layer')
else:
agg_ = []
for layer in layers:
agg_.append(layer(input_))
out_ = Concatenate()(agg_)
return out_
def unchanged_shape(input_shape):
return input_shape
def substract(input_1, input_2):
neg_input_2 = Lambda(lambda x: -x, output_shape=unchanged_shape)(input_2)
out_ = Add()([input_1, neg_input_2])
return out_
def submult(input_1, input_2):
mult = Multiply()([input_1, input_2])
sub = substract(input_1, input_2)
out_ = Concatenate()([sub, mult])
return out_
def soft_attention_alignment(input_1, input_2):
attention = Dot(axes=-1)([input_1, input_2])
w_att_1 = Lambda(lambda x: softmax(x, axis=1), ##soft max to each column
output_shape=unchanged_shape)(attention)
w_att_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2), ## axis =2 soft max to each row
output_shape=unchanged_shape)(attention))
in1_aligned = Dot(axes=1)([w_att_1, input_1])
in2_aligned = Dot(axes=1)([w_att_2, input_2])
return in1_aligned, in2_aligned
def focal_loss(y_true, y_pred, alpha=0.25, gamma=2.):
y_pred = K.clip(y_pred, 1e-8, 1 - 1e-8)
return - alpha * y_true * K.log(y_pred) * (1 - y_pred)**gamma\
- (1 - alpha) * (1 - y_true) * K.log(1 - y_pred) * y_pred**gamma
def get_model():
bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path)
# for l in bert_model.layers:
# l.trainable = True
T1 = Input(shape=(None,))
T2 = Input(shape=(None,))
tp1 = Lambda(lambda x: K.zeros_like(x))(T1)
tp2 = Lambda(lambda x: K.zeros_like(x))(T2)
x1 = bert_model([T1, tp1])
x2 = bert_model([T2, tp2])
X1 = Lambda(lambda x: x[:, 0:-1])(x1)
X2 = Lambda(lambda x: x[:, 0:-1])(x2)
encode = Bidirectional(LSTM(200, return_sequences=True))
q1_encoded = encode(X1)
q2_encoded = encode(X2)
q1_aligned, q2_aligned = soft_attention_alignment(q1_encoded, q2_encoded)
q1_combined = Concatenate()([q1_encoded, q2_aligned, submult(q1_encoded, q2_aligned)])
q2_combined = Concatenate()([q2_encoded, q1_aligned, submult(q2_encoded, q1_aligned)])
compose = Bidirectional(GRU(200, return_sequences=True))
q1_compare = compose(q1_combined)
q2_compare = compose(q2_combined)
# Aggregate
q1_rep = apply_multiple(q1_compare, [GlobalAvgPool1D(), GlobalMaxPool1D()])
q2_rep = apply_multiple(q2_compare, [GlobalAvgPool1D(), GlobalMaxPool1D()])
# Classifier
merged = Concatenate()([q1_rep, q2_rep])
dense = BatchNormalization()(merged)
dense = Dense(30, activation='selu')(dense)
dense = BatchNormalization()(dense)
output = Dense(2, activation='softmax')(dense)
model = Model([T1, T2], output)
model.compile(
# loss='categorical_crossentropy',
loss=focal_loss,
optimizer=Adam(1e-3), # 用足够小的学习率
metrics=['accuracy']
)
model.summary()
return model
skf = StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=42)
oof_train = np.zeros((len(train), 2), dtype=np.float32)
oof_test = np.zeros((len(test), 2), dtype=np.float32)
for fold, (train_index, valid_index) in enumerate(skf.split(train_achievements, labels)):
x1 = train_achievements[train_index]
x2 = train_requirements[train_index]
x1_token, x2_token = tokenize_data(x1, x2)
y = labels_cat[train_index]
val_x1 = train_achievements[valid_index]
val_x2 = train_requirements[valid_index]
val_x1_token, val_x2_token = tokenize_data(val_x1, val_x2)
val_y = labels_cat[valid_index]
early_stopping = EarlyStopping(monitor='val_accuracy', patience=patience, verbose=1)
model_checkpoint = ModelCheckpoint(model_path+"model_%s.w"%fold, monitor='val_accuracy', verbose=1,save_best_only=True, save_weights_only=False, mode='auto')
model = get_model()
model.fit(x=[x1_token, x2_token], y=y,
validation_data= ([val_x1_token, val_x2_token],val_y),
batch_size=batch_size,
epochs=num_epochs,
# steps_per_epoch= (len(x1)+ batch_size -1) // batch_size * percent_of_epoch,
# validation_steps = (len(val_x1)+ batch_size -1) // batch_size * percent_of_epoch ,
verbose=1,
callbacks=[early_stopping, model_checkpoint]
)
# model.load_weights('bert{}.w'.format(fold))
test_x1, test_x2 = tokenize_data(test_achievements, test_requirements)
oof_test += model.predict((test_x1, test_x2), batch_size=pred_batch_size)
K.clear_session()
oof_test /= nfolds
test=pd.DataFrame(oof_test)
test.to_csv('test_pred.csv',index=False)
test.head(),test.shape
train=pd.DataFrame(oof_train)
train.to_csv('train_pred.csv',index=False)
pred= | pd.read_csv('test_pred.csv') | pandas.read_csv |
# %%
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import random
from scipy.optimize import curve_fit
import scipy.stats as stats
from numba import njit, prange
import statsmodels.api as sm
import math
import itertools
import operator
import sys
sys.path.append('../shared')
from wednesdaySPEED import simulation
from analytic_tools import gen_hurst_exponent, count_crashes
from original_implementation import execute
# %%
'''
The following cells are used to generate graphs for the Hurst Exponent
analysis
'''
res = np.zeros((20, 30))
for z in range(1):
G,P,N,S,X,D,T,U,C, initial_account_balance = simulation(trigger = False, bound = True, pd = 0.05, pe = 0.01,
ph = 0.0485, pa = 0.7, N0=1000, N1 = 100, A = 4, a=1, h=1,
pi1 = 0.5, pi2 = 0.3, pi3 = 0.2)
h_res, q_vals = gen_hurst_exponent(S, 30)
res[z,:] = h_res*q_vals
res_mean_ca = np.mean(res, axis=0)
res_std_ca = np.std(res, axis=0)
#%%
'''
loading data for S&P
'''
df = pd.read_csv("../../data/all_world_indices_clean.csv")
print(df.columns)
df_spx = df[["Date", "SPX Index"]]
df_spx["Date"] = pd.to_datetime(df_spx["Date"], format='%d/%m/%Y')
df_spx = df_spx.sort_values(by="Date")
df_spx.reset_index(inplace=True)
series_array = np.array(df_spx["SPX Index"])
## identical to np.split but doesnt raise exception if arrays not equal length
split = np.array_split(series_array, 6)
res = np.zeros((6, 30))
for i in range(len(split)):
h_res, q_vals = gen_hurst_exponent(split[i], 30)
res[i,:] = h_res*q_vals
res_mean_sp = np.mean(res, axis=0)
res_std_sp = np.std(res, axis=0)
#%%
'''
loading data for the NKY
'''
df = pd.read_csv("../../data/all_world_indices_clean.csv")
df_nky = df[["Date", "NKY Index"]]
df_nky["Date"] = | pd.to_datetime(df_nky["Date"], format='%d/%m/%Y') | pandas.to_datetime |
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import pytest
import datetime
from pandas.api.types import is_numeric_dtype
import timeserio.ini as ini
from timeserio.data.mock import mock_fit_data
from timeserio.preprocessing import PandasDateTimeFeaturizer
from timeserio.preprocessing.datetime import (
get_fractional_day_from_series, get_fractional_hour_from_series,
get_fractional_year_from_series, truncate_series,
get_zero_indexed_month_from_series, get_time_is_in_interval_from_series,
get_is_holiday_from_series
)
datetime_column = ini.Columns.datetime
seq_column = f'seq_{ini.Columns.datetime}'
usage_column = ini.Columns.target
@pytest.fixture
def df():
return mock_fit_data(start_date=datetime.datetime(2017, 1, 1, 1, 0))
@pytest.fixture
def featurizer():
return PandasDateTimeFeaturizer()
def test_get_fractional_hour_from_series():
series = pd.Series(
pd.date_range(start='2000-01-01', freq='0.5H', periods=48)
)
fractionalhour = get_fractional_hour_from_series(series)
expected = pd.Series(np.linspace(0, 23.5, 48))
pdt.assert_series_equal(fractionalhour, expected)
def test_get_fractional_day_from_series():
series = pd.Series(pd.date_range(start='2000-01-01', freq='6H', periods=5))
fractional_day = get_fractional_day_from_series(series)
expected = pd.Series([0, 0.25, 0.5, 0.75, 0])
pdt.assert_series_equal(fractional_day, expected)
def test_get_fractional_year_from_series():
series = pd.Series(
pd.date_range(start='2000-01-01', freq='31D', periods=5)
)
fractional_year = get_fractional_year_from_series(series)
expected = pd.Series([0, 1, 2, 3, 4]) * 31 / 365.
pdt.assert_series_equal(fractional_year, expected)
def test_get_is_holiday_from_series():
series = pd.Series(pd.date_range(start='2000-01-01', freq='D', periods=5))
is_holiday = get_is_holiday_from_series(series)
expected = pd.Series([1, 1, 1, 1, 0])
pdt.assert_series_equal(is_holiday, expected)
@pytest.mark.parametrize(
"country, expected",
[("England", [1, 0, 0, 1]), ("Scotland", [1, 1, 1, 0])]
)
def test_get_is_holiday_from_series_with_country(country, expected):
dates = ["2020-01-01", "2020-01-02", "2020-08-03", "2020-08-31"]
series = pd.to_datetime(pd.Series(dates))
is_holiday = get_is_holiday_from_series(series, country=country)
pdt.assert_series_equal(is_holiday, pd.Series(expected))
def test_get_zero_indexed_month_from_series():
series = pd.Series(
pd.date_range(start='2000-01-01', freq='1M', periods=12)
)
month0 = get_zero_indexed_month_from_series(series)
expected = pd.Series(range(12))
pdt.assert_series_equal(month0, expected)
@pytest.mark.parametrize(
'series_data, truncation_period, expected_data',
[
([pd.Timestamp(2019, 1, 1, 1, 9)], 'H', [pd.Timestamp(2019, 1, 1, 1)]),
([ | pd.Timestamp(2019, 1, 2, 1) | pandas.Timestamp |
# coding: utf-8
# # CareerCon 2019 - Help Navigate Robots
# ## Robots are smart… by design !!
#
# 
#
# ---
#
# Robots are smart… by design. To fully understand and properly navigate a task, however, they need input about their environment.
#
# In this competition, you’ll help robots recognize the floor surface they’re standing on using data collected from Inertial Measurement Units (IMU sensors).
#
# We’ve collected IMU sensor data while driving a small mobile robot over different floor surfaces on the university premises. The task is to predict which one of the nine floor types (carpet, tiles, concrete) the robot is on using sensor data such as acceleration and velocity. Succeed and you'll help improve the navigation of robots without assistance across many different surfaces, so they won’t fall down on the job.
#
# ### Its a golden chance to help humanity, by helping Robots !
#
# <br>
# <img src="https://media2.giphy.com/media/EizPK3InQbrNK/giphy.gif" border="1" width="400" height="300">
# <br>
# # DATA
# **X_[train/test].csv** - the input data, covering 10 sensor channels and 128 measurements per time series plus three ID columns:
#
# - ```row_id```: The ID for this row.
#
# - ```series_id: ID``` number for the measurement series. Foreign key to y_train/sample_submission.
#
# - ```measurement_number```: Measurement number within the series.
#
# The orientation channels encode the current angles how the robot is oriented as a quaternion (see Wikipedia). Angular velocity describes the angle and speed of motion, and linear acceleration components describe how the speed is changing at different times. The 10 sensor channels are:
#
# ```
# orientation_X
#
# orientation_Y
#
# orientation_Z
#
# orientation_W
#
# angular_velocity_X
#
# angular_velocity_Y
#
# angular_velocity_Z
#
# linear_acceleration_X
#
# linear_acceleration_Y
#
# linear_acceleration_Z
# ```
#
# **y_train.csv** - the surfaces for training set.
#
# - ```series_id```: ID number for the measurement series.
#
# - ```group_id```: ID number for all of the measurements taken in a recording session. Provided for the training set only, to enable more cross validation strategies.
#
# - ```surface```: the target for this competition.
#
# **sample_submission.csv** - a sample submission file in the correct format.
# ### Load packages
# In[1]:
import numpy as np
import pandas as pd
import os
from time import time
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from matplotlib import rcParams
get_ipython().run_line_magic('matplotlib', 'inline')
le = preprocessing.LabelEncoder()
from numba import jit
import itertools
from seaborn import countplot,lineplot, barplot
from numba import jit
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn import preprocessing
from scipy.stats import randint as sp_randint
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
import matplotlib.style as style
style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
import gc
gc.enable()
get_ipython().system('ls ../input/')
get_ipython().system('ls ../input/robots-best-submission')
print ("Ready !")
# ### Load data
# In[2]:
data = pd.read_csv('../input/career-con-2019/X_train.csv')
tr = pd.read_csv('../input/career-con-2019/X_train.csv')
sub = pd.read_csv('../input/career-con-2019/sample_submission.csv')
test = pd.read_csv('../input/career-con-2019/X_test.csv')
target = pd.read_csv('../input/career-con-2019/y_train.csv')
print ("Data is ready !!")
# # Data exploration
# In[3]:
data.head()
# In[4]:
test.head()
# In[5]:
target.head()
# In[6]:
len(data.measurement_number.value_counts())
# Each series has 128 measurements.
#
# **1 serie = 128 measurements**.
#
# For example, serie with series_id=0 has a surface = *fin_concrete* and 128 measurements.
# ### describe (basic stats)
# In[7]:
data.describe()
# In[8]:
test.describe()
# In[9]:
target.describe()
# ### There is missing data in test and train data
# In[10]:
totalt = data.isnull().sum().sort_values(ascending=False)
percent = (data.isnull().sum()/data.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([totalt, percent], axis=1, keys=['Total', 'Percent'])
print ("Missing Data at Training")
missing_data.tail()
# In[11]:
totalt = test.isnull().sum().sort_values(ascending=False)
percent = (test.isnull().sum()/data.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([totalt, percent], axis=1, keys=['Total', 'Percent'])
print ("Missing Data at Test")
missing_data.tail()
# In[12]:
print ("Test has ", (test.shape[0]-data.shape[0])/128, "series more than Train (later I will prove it) = 768 registers")
dif = test.shape[0]-data.shape[0]
print ("Let's check this extra 6 series")
test.tail(768).describe()
# If we look at the features: orientation, angular velocity and linear acceleration, we can see big differences between **max** and **min** from entire test vs 6 extra test's series (see **linear_acceleration_Z**).
#
# Obviously we are comparing 3810 series vs 6 series so this is not a big deal.
# ### goup_id will be important !!
# In[13]:
target.groupby('group_id').surface.nunique().max()
# In[14]:
target['group_id'].nunique()
# **73 groups**
# **Each group_id is a unique recording session and has only one surface type **
# In[15]:
sns.set(style='darkgrid')
sns.countplot(y = 'surface',
data = target,
order = target['surface'].value_counts().index)
plt.show()
# ### Target feature - surface and group_id distribution
# Let's show now the distribution of target feature - surface and group_id.
# by @gpreda.
# In[16]:
fig, ax = plt.subplots(1,1,figsize=(26,8))
tmp = pd.DataFrame(target.groupby(['group_id', 'surface'])['series_id'].count().reset_index())
m = tmp.pivot(index='surface', columns='group_id', values='series_id')
s = sns.heatmap(m, linewidths=.1, linecolor='black', annot=True, cmap="YlGnBu")
s.set_title('Number of surface category per group_id', size=16)
plt.show()
# We need to classify on which surface our robot is standing.
#
# Multi-class Multi-output
#
# 9 classes (suface)
# In[17]:
plt.figure(figsize=(23,5))
sns.set(style="darkgrid")
countplot(x="group_id", data=target, order = target['group_id'].value_counts().index)
plt.show()
# **So, we have 3810 train series, and 3816 test series.
# Let's engineer some features!**
#
# ## Example: Series 1
#
# Let's have a look at the values of features in a single time-series, for example series 1 ```series_id=0```
#
# Click to see all measurements of the **first series**
# In[18]:
serie1 = tr.head(128)
serie1.head()
# In[19]:
serie1.describe()
# In[20]:
plt.figure(figsize=(26, 16))
for i, col in enumerate(serie1.columns[3:]):
plt.subplot(3, 4, i + 1)
plt.plot(serie1[col])
plt.title(col)
# In this example, we can see a quite interesting performance:
# 1. Orientation X increases
# 2. Orientation Y decreases
# 3. We don't see any kind of pattern except for linear_acceleration_Y
#
# And we know that in this series, the robot moved throuh "fine_concrete".
# In[21]:
target.head(1)
# In[22]:
del serie1
gc.collect()
# ## Visualizing Series
#
# Before, I showed you as an example the series 1.
#
# **This code allows you to visualize any series.**
#
# From: *Code Snippet For Visualizing Series Id by @shaz13*
# In[23]:
series_dict = {}
for series in (data['series_id'].unique()):
series_dict[series] = data[data['series_id'] == series]
# In[24]:
def plotSeries(series_id):
style.use('ggplot')
plt.figure(figsize=(28, 16))
print(target[target['series_id'] == series_id]['surface'].values[0].title())
for i, col in enumerate(series_dict[series_id].columns[3:]):
if col.startswith("o"):
color = 'red'
elif col.startswith("a"):
color = 'green'
else:
color = 'blue'
if i >= 7:
i+=1
plt.subplot(3, 4, i + 1)
plt.plot(series_dict[series_id][col], color=color, linewidth=3)
plt.title(col)
# **Now, Let's see code for series 15 ( is an example, try what you want)**
# In[25]:
id_series = 15
plotSeries(id_series)
# In[26]:
del series_dict
gc.collect()
# <br>
# ### Correlations (Part I)
# In[27]:
f,ax = plt.subplots(figsize=(8, 8))
sns.heatmap(tr.iloc[:,3:].corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
# **Correlations test (click "code")**
# In[28]:
f,ax = plt.subplots(figsize=(8, 8))
sns.heatmap(test.iloc[:,3:].corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
# Well, this is immportant, there is a **strong correlation** between:
# - angular_velocity_Z and angular_velocity_Y
# - orientation_X and orientation_Y
# - orientation_Y and orientation_Z
#
# Moreover, test has different correlations than training, for example:
#
# - angular_velocity_Z and orientation_X: -0.1(training) and 0.1(test). Anyway, is too small in both cases, it should not be a problem.
# ## Fourier Analysis
#
# My hope was, that different surface types yield (visible) differences in the frequency spectrum of the sensor measurements.
#
# Machine learning techniques might learn frequency filters on their own, but why don't give the machine a little head start? So I computed the the cyclic FFT for the angular velocity and linear acceleration sensors and plotted mean and standard deviation of the absolute values of the frequency components per training surface category (leaving out the frequency 0 (i.e. constants like sensor bias, earth gravity, ...).
#
# The sensors show some different frequency characterists (see plots below), but unfortunately the surface categories have all similar (to the human eye) shapes, varying mostly in total power, and the standard deviations are high (compared to differences in the means). So there are no nice strong characteristic peaks for surface types. But that does not mean, that there is nothing detectable by more sophisticated statistical methods.
#
# This article http://www.kaggle.com/christoffer/establishing-sampling-frequency makes a convincing case, that the sampling frequency is around 400Hz, so according to that you would see the frequency range to 3-200 Hz in the diagrams (and aliased higher frequencies).
#
# by [@trohwer64](https://www.kaggle.com/trohwer64)
# In[29]:
get_ipython().system('ls ../input')
# In[30]:
train_x = pd.read_csv('../input/career-con-2019/X_train.csv')
train_y = pd.read_csv('../input/career-con-2019/y_train.csv')
# In[31]:
import math
def prepare_data(t):
def f(d):
d=d.sort_values(by=['measurement_number'])
return pd.DataFrame({
'lx':[ d['linear_acceleration_X'].values ],
'ly':[ d['linear_acceleration_Y'].values ],
'lz':[ d['linear_acceleration_Z'].values ],
'ax':[ d['angular_velocity_X'].values ],
'ay':[ d['angular_velocity_Y'].values ],
'az':[ d['angular_velocity_Z'].values ],
})
t= t.groupby('series_id').apply(f)
def mfft(x):
return [ x/math.sqrt(128.0) for x in np.absolute(np.fft.fft(x)) ][1:65]
t['lx_f']=[ mfft(x) for x in t['lx'].values ]
t['ly_f']=[ mfft(x) for x in t['ly'].values ]
t['lz_f']=[ mfft(x) for x in t['lz'].values ]
t['ax_f']=[ mfft(x) for x in t['ax'].values ]
t['ay_f']=[ mfft(x) for x in t['ay'].values ]
t['az_f']=[ mfft(x) for x in t['az'].values ]
return t
# In[32]:
t=prepare_data(train_x)
t=pd.merge(t,train_y[['series_id','surface','group_id']],on='series_id')
t=t.rename(columns={"surface": "y"})
# In[33]:
def aggf(d, feature):
va= np.array(d[feature].tolist())
mean= sum(va)/va.shape[0]
var= sum([ (va[i,:]-mean)**2 for i in range(va.shape[0]) ])/va.shape[0]
dev= [ math.sqrt(x) for x in var ]
return pd.DataFrame({
'mean': [ mean ],
'dev' : [ dev ],
})
display={
'hard_tiles_large_space':'r-.',
'concrete':'g-.',
'tiled':'b-.',
'fine_concrete':'r-',
'wood':'g-',
'carpet':'b-',
'soft_pvc':'y-',
'hard_tiles':'r--',
'soft_tiles':'g--',
}
# In[34]:
import matplotlib.pyplot as plt
plt.figure(figsize=(14, 8*7))
#plt.margins(x=0.0, y=0.0)
#plt.tight_layout()
# plt.figure()
features=['lx_f','ly_f','lz_f','ax_f','ay_f','az_f']
count=0
for feature in features:
stat= t.groupby('y').apply(aggf,feature)
stat.index= stat.index.droplevel(-1)
b=[*range(len(stat.at['carpet','mean']))]
count+=1
plt.subplot(len(features)+1,1,count)
for i,(k,v) in enumerate(display.items()):
plt.plot(b, stat.at[k,'mean'], v, label=k)
# plt.errorbar(b, stat.at[k,'mean'], yerr=stat.at[k,'dev'], fmt=v)
leg = plt.legend(loc='best', ncol=3, mode="expand", shadow=True, fancybox=True)
plt.title("sensor: " + feature)
plt.xlabel("frequency component")
plt.ylabel("amplitude")
count+=1
plt.subplot(len(features)+1,1,count)
k='concrete'
v=display[k]
feature='lz_f'
stat= t.groupby('y').apply(aggf,feature)
stat.index= stat.index.droplevel(-1)
b=[*range(len(stat.at['carpet','mean']))]
plt.errorbar(b, stat.at[k,'mean'], yerr=stat.at[k,'dev'], fmt=v)
plt.title("sample for error bars (lz_f, surface concrete)")
plt.xlabel("frequency component")
plt.ylabel("amplitude")
plt.show()
# In[35]:
del train_x, train_y
gc.collect()
# ## Is it an Humanoid Robot instead of a car?
#
# 
#
# **Acceleration**
# - X (mean at 0)
# - Y axis is centered at a value wich shows us the movement (straight ).
# - Z axis is centered at 10 (+- 9.8) wich is the gravity !! , you can see how the robot bounds.
#
# Angular velocity (X,Y,Z) has mean (0,0,0) so there is no lineal movement on those axis (measured with an encoder or potentiometer)
#
# **Fourier**
#
# We can see: with a frequency 3 Hz we can see an acceleration, I think that acceleration represents one step.
# Maybe ee can suppose that every step is caused by many different movements, that's why there are different accelerations at different frequencies.
#
# Angular velocity represents spins.
# Every time the engine/servo spins, the robot does an step - relation between acc y vel.
# ---
#
# # Feature Engineering
# In[36]:
def plot_feature_distribution(df1, df2, label1, label2, features,a=2,b=5):
i = 0
sns.set_style('whitegrid')
plt.figure()
fig, ax = plt.subplots(a,b,figsize=(17,9))
for feature in features:
i += 1
plt.subplot(a,b,i)
sns.kdeplot(df1[feature], bw=0.5,label=label1)
sns.kdeplot(df2[feature], bw=0.5,label=label2)
plt.xlabel(feature, fontsize=9)
locs, labels = plt.xticks()
plt.tick_params(axis='x', which='major', labelsize=8)
plt.tick_params(axis='y', which='major', labelsize=8)
plt.show();
# In[37]:
features = data.columns.values[3:]
plot_feature_distribution(data, test, 'train', 'test', features)
# Godd news, our basic features have the **same distribution (Normal) on test and training**. There are some differences between *orientation_X* , *orientation_Y* and *linear_acceleration_Y*.
#
# I willl try **StandardScaler** to fix this, and remember: orientation , angular velocity and linear acceleration are measured with different units, scaling might be a good choice.
# In[38]:
def plot_feature_class_distribution(classes,tt, features,a=5,b=2):
i = 0
sns.set_style('whitegrid')
plt.figure()
fig, ax = plt.subplots(a,b,figsize=(16,24))
for feature in features:
i += 1
plt.subplot(a,b,i)
for clas in classes:
ttc = tt[tt['surface']==clas]
sns.kdeplot(ttc[feature], bw=0.5,label=clas)
plt.xlabel(feature, fontsize=9)
locs, labels = plt.xticks()
plt.tick_params(axis='x', which='major', labelsize=8)
plt.tick_params(axis='y', which='major', labelsize=8)
plt.show();
# In[39]:
classes = (target['surface'].value_counts()).index
aux = data.merge(target, on='series_id', how='inner')
plot_feature_class_distribution(classes, aux, features)
# **Normal distribution**
#
# There are obviously differences between *surfaces* and that's good, we will focus on that in order to classify them better.
#
# Knowing this differences and that variables follow a normal distribution (in most of the cases) we need to add new features like: ```mean, std, median, range ...``` (for each variable).
#
# However, I will try to fix *orientation_X* and *orientation_Y* as I explained before, scaling and normalizing data.
#
# ---
#
# ### Now with a new scale (more more precision)
# In[40]:
plt.figure(figsize=(26, 16))
for i,col in enumerate(aux.columns[3:13]):
ax = plt.subplot(3,4,i+1)
ax = plt.title(col)
for surface in classes:
surface_feature = aux[aux['surface'] == surface]
sns.kdeplot(surface_feature[col], label = surface)
# ### Histogram for main features
# In[41]:
plt.figure(figsize=(26, 16))
for i, col in enumerate(data.columns[3:]):
ax = plt.subplot(3, 4, i + 1)
sns.distplot(data[col], bins=100, label='train')
sns.distplot(test[col], bins=100, label='test')
ax.legend()
# ## Step 0 : quaternions
# Orientation - quaternion coordinates
# You could notice that there are 4 coordinates: X, Y, Z, W.
#
# Usually we have X, Y, Z - Euler Angles. But Euler Angles are limited by a phenomenon called "gimbal lock," which prevents them from measuring orientation when the pitch angle approaches +/- 90 degrees. Quaternions provide an alternative measurement technique that does not suffer from gimbal lock. Quaternions are less intuitive than Euler Angles and the math can be a little more complicated.
#
# Here are some articles about it:
#
# http://www.chrobotics.com/library/understanding-quaternions
#
# http://www.tobynorris.com/work/prog/csharp/quatview/help/orientations_and_quaternions.htm
#
# Basically 3D coordinates are converted to 4D vectors.
# In[42]:
# https://stackoverflow.com/questions/53033620/how-to-convert-euler-angles-to-quaternions-and-get-the-same-euler-angles-back-fr?rq=1
def quaternion_to_euler(x, y, z, w):
import math
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
X = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
Y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
Z = math.atan2(t3, t4)
return X, Y, Z
# In[43]:
def fe_step0 (actual):
# https://www.mathworks.com/help/aeroblks/quaternionnorm.html
# https://www.mathworks.com/help/aeroblks/quaternionmodulus.html
# https://www.mathworks.com/help/aeroblks/quaternionnormalize.html
# Spoiler: you don't need this ;)
actual['norm_quat'] = (actual['orientation_X']**2 + actual['orientation_Y']**2 + actual['orientation_Z']**2 + actual['orientation_W']**2)
actual['mod_quat'] = (actual['norm_quat'])**0.5
actual['norm_X'] = actual['orientation_X'] / actual['mod_quat']
actual['norm_Y'] = actual['orientation_Y'] / actual['mod_quat']
actual['norm_Z'] = actual['orientation_Z'] / actual['mod_quat']
actual['norm_W'] = actual['orientation_W'] / actual['mod_quat']
return actual
#
# > *Are there any reasons to not automatically normalize a quaternion? And if there are, what quaternion operations do result in non-normalized quaternions?*
#
# Any operation that produces a quaternion will need to be normalized because floating-point precession errors will cause it to not be unit length.
# I would advise against standard routines performing normalization automatically for performance reasons.
# Any competent programmer should be aware of the precision issues and be able to normalize the quantities when necessary - and it is not always necessary to have a unit length quaternion.
# The same is true for vector operations.
#
# source: https://stackoverflow.com/questions/11667783/quaternion-and-normalization
# In[44]:
data = fe_step0(data)
test = fe_step0(test)
print(data.shape)
data.head()
# In[45]:
fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, figsize=(18, 5))
ax1.set_title('quaternion X')
sns.kdeplot(data['norm_X'], ax=ax1, label="train")
sns.kdeplot(test['norm_X'], ax=ax1, label="test")
ax2.set_title('quaternion Y')
sns.kdeplot(data['norm_Y'], ax=ax2, label="train")
sns.kdeplot(test['norm_Y'], ax=ax2, label="test")
ax3.set_title('quaternion Z')
sns.kdeplot(data['norm_Z'], ax=ax3, label="train")
sns.kdeplot(test['norm_Z'], ax=ax3, label="test")
ax4.set_title('quaternion W')
sns.kdeplot(data['norm_W'], ax=ax4, label="train")
sns.kdeplot(test['norm_W'], ax=ax4, label="test")
plt.show()
# ## Step 1: (x, y, z, w) -> (x,y,z) quaternions to euler angles
# In[46]:
def fe_step1 (actual):
"""Quaternions to Euler Angles"""
x, y, z, w = actual['norm_X'].tolist(), actual['norm_Y'].tolist(), actual['norm_Z'].tolist(), actual['norm_W'].tolist()
nx, ny, nz = [], [], []
for i in range(len(x)):
xx, yy, zz = quaternion_to_euler(x[i], y[i], z[i], w[i])
nx.append(xx)
ny.append(yy)
nz.append(zz)
actual['euler_x'] = nx
actual['euler_y'] = ny
actual['euler_z'] = nz
return actual
# In[47]:
data = fe_step1(data)
test = fe_step1(test)
print (data.shape)
data.head()
# 
# In[48]:
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(15, 5))
ax1.set_title('Roll')
sns.kdeplot(data['euler_x'], ax=ax1, label="train")
sns.kdeplot(test['euler_x'], ax=ax1, label="test")
ax2.set_title('Pitch')
sns.kdeplot(data['euler_y'], ax=ax2, label="train")
sns.kdeplot(test['euler_y'], ax=ax2, label="test")
ax3.set_title('Yaw')
sns.kdeplot(data['euler_z'], ax=ax3, label="train")
sns.kdeplot(test['euler_z'], ax=ax3, label="test")
plt.show()
# **Euler angles** are really important, and we have a problem with Z.
#
# ### Why Orientation_Z (euler angle Z) is so important?
#
# We have a robot moving around, imagine a robot moving straight through different surfaces (each with different features), for example concrete and hard tile floor. Our robot can can **bounce** or **balance** itself a little bit on if the surface is not flat and smooth, that's why we need to work with quaternions and take care of orientation_Z.
#
# 
# In[49]:
data.head()
# ## Step 2: + Basic features
# In[50]:
def feat_eng(data):
df = pd.DataFrame()
data['totl_anglr_vel'] = (data['angular_velocity_X']**2 + data['angular_velocity_Y']**2 + data['angular_velocity_Z']**2)** 0.5
data['totl_linr_acc'] = (data['linear_acceleration_X']**2 + data['linear_acceleration_Y']**2 + data['linear_acceleration_Z']**2)**0.5
data['totl_xyz'] = (data['orientation_X']**2 + data['orientation_Y']**2 + data['orientation_Z']**2)**0.5
data['acc_vs_vel'] = data['totl_linr_acc'] / data['totl_anglr_vel']
def mean_change_of_abs_change(x):
return np.mean(np.diff(np.abs(np.diff(x))))
for col in data.columns:
if col in ['row_id','series_id','measurement_number']:
continue
df[col + '_mean'] = data.groupby(['series_id'])[col].mean()
df[col + '_median'] = data.groupby(['series_id'])[col].median()
df[col + '_max'] = data.groupby(['series_id'])[col].max()
df[col + '_min'] = data.groupby(['series_id'])[col].min()
df[col + '_std'] = data.groupby(['series_id'])[col].std()
df[col + '_range'] = df[col + '_max'] - df[col + '_min']
df[col + '_maxtoMin'] = df[col + '_max'] / df[col + '_min']
df[col + '_mean_abs_chg'] = data.groupby(['series_id'])[col].apply(lambda x: np.mean(np.abs(np.diff(x))))
df[col + '_mean_change_of_abs_change'] = data.groupby('series_id')[col].apply(mean_change_of_abs_change)
df[col + '_abs_max'] = data.groupby(['series_id'])[col].apply(lambda x: np.max(np.abs(x)))
df[col + '_abs_min'] = data.groupby(['series_id'])[col].apply(lambda x: np.min(np.abs(x)))
df[col + '_abs_avg'] = (df[col + '_abs_min'] + df[col + '_abs_max'])/2
return df
# In[51]:
get_ipython().run_cell_magic('time', '', 'data = feat_eng(data)\ntest = feat_eng(test)\nprint ("New features: ",data.shape)')
# In[52]:
data.head()
# ## New advanced features
# **Useful functions**
# In[53]:
from scipy.stats import kurtosis
from scipy.stats import skew
def _kurtosis(x):
return kurtosis(x)
def CPT5(x):
den = len(x)*np.exp(np.std(x))
return sum(np.exp(x))/den
def skewness(x):
return skew(x)
def SSC(x):
x = np.array(x)
x = np.append(x[-1], x)
x = np.append(x,x[1])
xn = x[1:len(x)-1]
xn_i2 = x[2:len(x)] # xn+1
xn_i1 = x[0:len(x)-2] # xn-1
ans = np.heaviside((xn-xn_i1)*(xn-xn_i2),0)
return sum(ans[1:])
def wave_length(x):
x = np.array(x)
x = np.append(x[-1], x)
x = np.append(x,x[1])
xn = x[1:len(x)-1]
xn_i2 = x[2:len(x)] # xn+1
return sum(abs(xn_i2-xn))
def norm_entropy(x):
tresh = 3
return sum(np.power(abs(x),tresh))
def SRAV(x):
SRA = sum(np.sqrt(abs(x)))
return np.power(SRA/len(x),2)
def mean_abs(x):
return sum(abs(x))/len(x)
def zero_crossing(x):
x = np.array(x)
x = np.append(x[-1], x)
x = np.append(x,x[1])
xn = x[1:len(x)-1]
xn_i2 = x[2:len(x)] # xn+1
return sum(np.heaviside(-xn*xn_i2,0))
# This advanced features based on robust statistics.
# In[54]:
def fe_advanced_stats(data):
df = pd.DataFrame()
for col in data.columns:
if col in ['row_id','series_id','measurement_number']:
continue
if 'orientation' in col:
continue
print ("FE on column ", col, "...")
df[col + '_skew'] = data.groupby(['series_id'])[col].skew()
df[col + '_mad'] = data.groupby(['series_id'])[col].mad()
df[col + '_q25'] = data.groupby(['series_id'])[col].quantile(0.25)
df[col + '_q75'] = data.groupby(['series_id'])[col].quantile(0.75)
df[col + '_q95'] = data.groupby(['series_id'])[col].quantile(0.95)
df[col + '_iqr'] = df[col + '_q75'] - df[col + '_q25']
df[col + '_CPT5'] = data.groupby(['series_id'])[col].apply(CPT5)
df[col + '_SSC'] = data.groupby(['series_id'])[col].apply(SSC)
df[col + '_skewness'] = data.groupby(['series_id'])[col].apply(skewness)
df[col + '_wave_lenght'] = data.groupby(['series_id'])[col].apply(wave_length)
df[col + '_norm_entropy'] = data.groupby(['series_id'])[col].apply(norm_entropy)
df[col + '_SRAV'] = data.groupby(['series_id'])[col].apply(SRAV)
df[col + '_kurtosis'] = data.groupby(['series_id'])[col].apply(_kurtosis)
df[col + '_zero_crossing'] = data.groupby(['series_id'])[col].apply(zero_crossing)
return df
# - Frequency of the max value
# - Frequency of the min value
# - Count Positive values
# - Count Negative values
# - Count zeros
# In[55]:
basic_fe = ['linear_acceleration_X','linear_acceleration_Y','linear_acceleration_Z',
'angular_velocity_X','angular_velocity_Y','angular_velocity_Z']
# In[56]:
def fe_plus (data):
aux = pd.DataFrame()
for serie in data.index:
#if serie%500 == 0: print ("> Serie = ",serie)
aux = X_train[X_train['series_id']==serie]
for col in basic_fe:
data.loc[serie,col + '_unq'] = aux[col].round(3).nunique()
data.loc[serie,col + 'ratio_unq'] = aux[col].round(3).nunique()/18
try:
data.loc[serie,col + '_freq'] = aux[col].value_counts().idxmax()
except:
data.loc[serie,col + '_freq'] = 0
data.loc[serie,col + '_max_freq'] = aux[aux[col] == aux[col].max()].shape[0]
data.loc[serie,col + '_min_freq'] = aux[aux[col] == aux[col].min()].shape[0]
data.loc[serie,col + '_pos_freq'] = aux[aux[col] >= 0].shape[0]
data.loc[serie,col + '_neg_freq'] = aux[aux[col] < 0].shape[0]
data.loc[serie,col + '_nzeros'] = (aux[col]==0).sum(axis=0)
# ### Important !
# As you can see in this kernel https://www.kaggle.com/anjum48/leakage-within-the-train-dataset
#
# As discussed in the discussion forums (https://www.kaggle.com/c/career-con-2019/discussion/87239#latest-508136) it looks as if each series is part of longer aquisition periods that have been cut up into chunks with 128 samples.
#
# This means that each series is not truely independent and there is leakage between them via the orientation data. Therefore if you have any features that use orientation, you will get a very high CV score due to this leakage in the train set.
#
# [This kernel](https://www.kaggle.com/anjum48/leakage-within-the-train-dataset) will show you how it is possible to get a CV score of 0.992 using only the **orientation data**.
#
# ---
#
# **So I recommend not to use orientation information**
# ## Correlations (Part II)
# In[57]:
#https://stackoverflow.com/questions/17778394/list-highest-correlation-pairs-from-a-large-correlation-matrix-in-pandas
corr_matrix = data.corr().abs()
raw_corr = data.corr()
sol = (corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
.stack()
.sort_values(ascending=False))
top_corr = pd.DataFrame(sol).reset_index()
top_corr.columns = ["var1", "var2", "abs corr"]
# with .abs() we lost the sign, and it's very important.
for x in range(len(top_corr)):
var1 = top_corr.iloc[x]["var1"]
var2 = top_corr.iloc[x]["var2"]
corr = raw_corr[var1][var2]
top_corr.at[x, "raw corr"] = corr
# In[58]:
top_corr.head(15)
# ### Filling missing NAs and infinite data ∞ by zeroes 0
# In[59]:
data.fillna(0,inplace=True)
test.fillna(0,inplace=True)
data.replace(-np.inf,0,inplace=True)
data.replace(np.inf,0,inplace=True)
test.replace(-np.inf,0,inplace=True)
test.replace(np.inf,0,inplace=True)
# ## Label encoding
# In[60]:
target.head()
# In[61]:
target['surface'] = le.fit_transform(target['surface'])
# In[62]:
target['surface'].value_counts()
# In[63]:
target.head()
# # Run Model
# **use random_state at Random Forest**
#
# if you don't use random_state you will get a different solution everytime, sometimes you will be lucky, but other times you will lose your time comparing.
# **Validation Strategy: Stratified KFold**
# In[64]:
folds = StratifiedKFold(n_splits=10, shuffle=True, random_state=59)
# In[65]:
predicted = np.zeros((test.shape[0],9))
measured= np.zeros((data.shape[0]))
score = 0
# In[66]:
for times, (trn_idx, val_idx) in enumerate(folds.split(data.values,target['surface'].values)):
model = RandomForestClassifier(n_estimators=500, n_jobs = -1)
#model = RandomForestClassifier(n_estimators=500, max_depth=10, min_samples_split=5, n_jobs=-1)
model.fit(data.iloc[trn_idx],target['surface'][trn_idx])
measured[val_idx] = model.predict(data.iloc[val_idx])
predicted += model.predict_proba(test)/folds.n_splits
score += model.score(data.iloc[val_idx],target['surface'][val_idx])
print("Fold: {} score: {}".format(times,model.score(data.iloc[val_idx],target['surface'][val_idx])))
importances = model.feature_importances_
indices = np.argsort(importances)
features = data.columns
if model.score(data.iloc[val_idx],target['surface'][val_idx]) > 0.92000:
hm = 30
plt.figure(figsize=(7, 10))
plt.title('Feature Importances')
plt.barh(range(len(indices[:hm])), importances[indices][:hm], color='b', align='center')
plt.yticks(range(len(indices[:hm])), [features[i] for i in indices])
plt.xlabel('Relative Importance')
plt.show()
gc.collect()
# In[67]:
print('Avg Accuracy RF', score / folds.n_splits)
# In[68]:
confusion_matrix(measured,target['surface'])
# ### Confusion Matrix Plot
# In[69]:
# https://www.kaggle.com/artgor/where-do-the-robots-drive
def plot_confusion_matrix(truth, pred, classes, normalize=False, title=''):
cm = confusion_matrix(truth, pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.figure(figsize=(10, 10))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix', size=15)
plt.colorbar(fraction=0.046, pad=0.04)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.grid(False)
plt.tight_layout()
# In[70]:
plot_confusion_matrix(target['surface'], measured, le.classes_)
# ### Submission (Part I)
# In[71]:
sub['surface'] = le.inverse_transform(predicted.argmax(axis=1))
sub.to_csv('submission.csv', index=False)
sub.head()
# ### Best Submission
# In[72]:
best_sub = pd.read_csv('../input/robots-best-submission/final_submission.csv')
best_sub.to_csv('best_submission.csv', index=False)
best_sub.head(10)
# ## References
#
# [1] https://www.kaggle.com/vanshjatana/help-humanity-by-helping-robots-4e306b
#
# [2] https://www.kaggle.com/artgor/where-do-the-robots-drive
#
# [3] https://www.kaggle.com/gpreda/robots-need-help
#
# [4] https://www.kaggle.com/vanshjatana/help-humanity-by-helping-robots-4e306b by [@vanshjatana](https://www.kaggle.com/vanshjatana)
# # ABOUT Submissions & Leaderboard
# This kernel [distribution hack](https://www.kaggle.com/donkeys/distribution-hack) by [@donkeys](https://www.kaggle.com/donkeys) simply produces 9 output files, one for each target category.
# I submitted each of these to the competition to see how much of each target type exists in the test set distribution. Results:
#
# - carpet 0.06
# - concrete 0.16
# - fine concrete 0.09
# - hard tiles 0.06
# - hard tiles large space 0.10
# - soft pvc 0.17
# - soft tiles 0.23
# - tiled 0.03
# - wood 0.06
#
# Also posted a discussion [thread](https://www.kaggle.com/c/career-con-2019/discussion/85204)
#
#
# **by [@ninoko](https://www.kaggle.com/ninoko)**
#
# I've probed the public leaderboard and this is what I got
# There are much less surfaces like wood or tiled, and much more soft and hard tiles in public leaderboard. This can be issue, why CV and LB results differ strangely.
#
# 
# **I will analyze my best submissions in order to find something interesting.**
#
# Please, feel free to optimize this code.
# In[73]:
sub073 = pd.read_csv('../input/robots-best-submission/mybest0.73.csv')
sub072 = pd.read_csv('../input/robots-best-submission/sub_0.72.csv')
sub072_2 = pd.read_csv('../input/robots-best-submission/sub_0.72_2.csv')
sub071 = | pd.read_csv('../input/robots-best-submission/sub_0.71.csv') | pandas.read_csv |
"""
Author: <NAME>
"""
from .holidays import Holidays
from pandas import to_datetime, Timestamp, DatetimeIndex, date_range, \
DateOffset
from pandas.tseries.offsets import MonthEnd, YearEnd
from pandas.core.series import Series
from numpy import busday_count, busday_offset, busdaycalendar, asarray, \
broadcast, broadcast_arrays, ndarray, minimum, divmod, count_nonzero, \
datetime64
class DayCounts(object):
# Constants
WKMASK = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
BUS = 'BUS'
ACT = 'ACT'
SEP = '/'
NL_DC = ['nl/365']
OO_DC = ['1/1']
BUS_DC = ['bus/30', 'bus/252', 'bus/1', 'bus/bus']
ACT_DC = ['act/act isda', 'act/365', 'act/365a', 'act/365f',
'act/364', 'act/360', 'act/365l', 'act/act afb',
'act/act icma']
XX360_DC = ['30a/360', '30e/360', '30e+/360', '30e/360 isda', '30u/360']
# Properties
__dc = None
__cal = None
__adj = None
__adjo = None
__busc = None
def __init__(self, dc, adj=None, calendar=None,
weekmask='Mon Tue Wed Thu Fri', adjoffset=0):
"""
Day count constructor
Parameters
----------
dc : str
Valid day count convention, e.g. 'act/360', 'bus/252', 'nl/365'.
Currently supported values are listed via static method
`dc_domain`.
adj : None, 'following', 'preceding', 'modifiedfollowing',
'modifiedpreceding', default None
None denotes no adjustment. If specified, it determines how
dates that do not fall on valid date are treated. Assuming
`adjoffset` set to 0:
- 'following' denotes next valid date
- 'preceding' denotes previous valid date
- 'modifiedfollowing' ('modifiedpreceding') is the next
(previous) valid date unless it is across a month boundary,
in which case it takes the first valid date earlier (later) in
time
calendar : None, str
If specified, it must be the name of a calendar supported by the
Holidays factory class
weekmask : str or array)like of bool, default 'Mon Tue Wed Thu Fri'
From numpy.busday_offset: A seven-element array indicating which
of Monday through Sunday are valid days. May be specified as a
length-seven list or array, like [1,1,1,1,1,0,0]; a length-seven
string, like ‘1111100’; or a string like “Mon Tue Wed Thu Fri”,
made up of 3-character abbreviations for weekdays, optionally
separated by white space. Valid abbreviations are: Mon Tue Wed
Thu Fri Sat Sun
adjoffset : int, default 0
Scalar indicating the offset value that will be used if
adjustment rule is not set to None
Returns
-------
self : DayCounts
New instance of object
Notes
-----
(1) THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
(2) Builds on numpy.datetime64 and pandas.Timestamp. As a rule,
inputs of methods are any type/value that can be properly parsed
by pandas.to_datetime() without optional inputs. Several methods
from these packaged are used.
"""
self.dc = dc
self.adj = adj
self.adjoffset = adjoffset
h = Holidays.holidays(cdr=calendar)
self.__busc = busdaycalendar(weekmask=weekmask, holidays=h)
self.calendar = calendar
def tf(self, d1, d2):
"""Calculates time fraction (in year fraction) between two dates given
day count convention"""
d1 = self.adjust(d1)
d2 = self.adjust(d2)
# Save adjustment state and set it to none, so we can safely use the
# days and dib functions of "date splits" we produce in for some
# day counts
state = self.adj
self.adj = None
if self.dc == 'ACT/ACT ICMA':
raise AttributeError('The time fraction function cannot be used '
'for the %s convention' % self.dc)
if not (self.dc == 'ACT/ACT ISDA' or self.dc == 'ACT/ACT AFB' or
self.dc == '1/1'):
yf = self.days(d1, d2) / self.dib(d1, d2)
elif self.dc == 'ACT/ACT ISDA':
# We could treat everything as an array, we leave the dual
# implementation because vectorizing is clumsy. So, we just
# mimic the interface
if isinstance(d1, Timestamp) and isinstance(d2, Timestamp):
# We place the assertion here to save some thought in the
# recursion (we check one by one or delegate)
assert d1 <= d2, 'First date must be smaller or equal to ' \
'second date'
if d1.year == d2.year:
yf = self.days(d1, d2) / self.dib(d1, d2)
else:
ey1 = to_datetime(str(d1.year)+'-12-31')
ey2 = to_datetime(str(d2.year-1)+'-12-31')
yf = (d2.year-d1.year-1) + \
(self.days(d1, ey1) / self.dib(d1, d1)) + \
(self.days(ey2, d2) / self.dib(d2, d2))
else: # This is the dreaded vectorized case that, for now,
# will be dealt by simulating the interface
result = list()
f = result.append
for t1, t2 in broadcast(d1, d2):
f(self.tf(t1, t2))
yf = asarray(result, dtype='float64')
elif self.dc == '1/1':
# See notes in the ACT/ACT sections about vectorization
if isinstance(d1, Timestamp) and isinstance(d2, Timestamp):
# We place the assertion here to save some thought in the
# recursion (we check one by one or delegate)
assert d1 <= d2, 'First date must be smaller or equal to ' \
'second date'
if (d1.day == d2.day and d1.month == d2.month) \
or (d1.month == 2 and d2.month == 2 and
d1.day in [28, 29] and d2.day in [28, 29]):
yf = int(0.5 + self.days(d1, d2) / self.dib(d1, d2))
else:
# This is the same as ACT/ACT. We tweak the DC and bring
# it back. This is computationally costly (as a parsing
# of the day count is involved at each step), but safer
# from an implementation perspective.
self.dc = 'act/act isda'
yf = self.tf(d1, d2)
self.dc = '1/1'
else: # This is the dreaded vectorized case that, for now,
# will be dealt by simulating the interface
result = list()
f = result.append
for t1, t2 in broadcast(d1, d2):
f(self.tf(t1, t2))
yf = asarray(result, dtype='float64')
elif self.dc == 'ACT/ACT AFB':
if isinstance(d1, Timestamp) and isinstance(d2, Timestamp):
# We place the assertion here to save some thought in the
# recursion (we check one by one or delegate)
assert d1 <= d2, 'First date must be smaller or equal to ' \
'second date'
# We need to loop back from d2 counting the number of
# years we can subtract until we close interval. Note that
# every time we fall on a Feb 29th, a year offset will land
# us on Feb 28th. In this cases, we need to add the missing
# day fraction (1/366). Note that we add it only once,
# and not the number of leap days in interval divided by
# 366. Why? While the documents are not super clear about
# this, it seems reasonable to infer that from the "counting
# back" rule, where we are always subtracting entire years.
#
# 2004-02-28 to 2008-02-27 = 3 + 365/366
# 2004-02-28 to 2008-02-28 = 4
# 2004-02-28 to 2008-02-29 = 4 + 1/366
# 2004-02-28 to 2012-02-28 = 8
# 2004-02-28 to 2012-02-29 = 8 + 1/366 (and NOT 2/366)
n = 0
offset = 0
while d2 - DateOffset(years=1) >= d1:
if d2.day == 29 and d2.month == 2:
offset += 1/366
n += 1
d2 = d2 - DateOffset(years=1)
yf = n + offset + (self.days(d1, d2) / self.dib(d1, d2))
else: # This is the dreaded vectorized case that, for now,
# will be dealt by simulating the interface
result = list()
f = result.append
for t1, t2 in broadcast(d1, d2):
f(self.tf(t1, t2))
yf = asarray(result, dtype='float64')
else:
raise NotImplementedError('Day count %s not supported' % self.dc)
# Return state
self.adj = state
return yf
def days(self, d1, d2):
"""Number of days (integer) between two dates given day count
convention"""
d1 = self.adjust(d1)
d2 = self.adjust(d2)
# All business cases are the same and dealt at once
bus_dc = [x.upper() for x in self.BUS_DC]
if self.dc in bus_dc:
if not isinstance(d1, Timestamp):
d1 = d1.values.astype('datetime64[D]')
else:
d1 = datetime64(d1).astype('datetime64[D]')
if not isinstance(d2, Timestamp):
d2 = d2.values.astype('datetime64[D]')
else:
d2 = datetime64(d2).astype('datetime64[D]')
return busday_count(d1, d2, busdaycal=self.buscore)
# Deal with the 30/360 like conventions
if self.dc == '30U/360':
y1, m1, d1, y2, m2, d2 = self._date_parser(d1, d2)
# Because the broadcasting occurred at parsing, everything is an
# array
# Adjustments (done in the following order)
# (i) If d2 is the last day of Feb, we change it to 30 only
# if d1 is the last day of feb
mask2 = (self.isleap(y2) & (d2 == 29) & (m2 == 2)) | \
(~self.isleap(y2) & (d2 == 28) & (m2 == 2))
mask1 = (self.isleap(y1) & (d1 == 29) & (m1 == 2)) | \
(~self.isleap(y1) & (d1 == 28) & (m1 == 2))
mask = mask1 & mask2
d2[mask] = 30
# (ii) If d1 is the last day of Feb, change it to 30
d1[mask1] = 30
# (iii) If d2 is 31, change it to 30 only if d1 (after ii) is 30
# or 31
mask2 = d2 == 31
mask1 = (d1 == 30) | (d1 == 31)
mask = mask1 & mask2
d2[mask] = 30
# (iv) If d1 is 31, change it to 30
mask = d1 == 31
d1[mask] = 30
# Call core function
days = self._days_30360_core(y1, m1, d1, y2, m2, d2)
if len(days) == 1:
return days[0]
else:
return days
elif self.dc == '30A/360':
y1, m1, d1, y2, m2, d2 = self._date_parser(d1, d2)
# Adjustments (done in the following order)
# (i) D1 = min(D1, 30)
d1 = minimum(d1, 30)
# (ii) If, after adjustment, d1 = 30, then d2 = min(d2, 30)
mask = d1 == 30
d2[mask] = minimum(d2[mask], 30)
days = self._days_30360_core(y1, m1, d1, y2, m2, d2)
if len(days) == 1:
return days[0]
else:
return days
elif self.dc == '30E/360':
y1, m1, d1, y2, m2, d2 = self._date_parser(d1, d2)
# No conditional adjustments in this case
d1 = minimum(d1, 30)
d2 = minimum(d2, 30)
days = self._days_30360_core(y1, m1, d1, y2, m2, d2)
if len(days) == 1:
return days[0]
else:
return days
elif self.dc == '30E/360 ISDA':
y1, m1, d1, y2, m2, d2 = self._date_parser(d1, d2)
# Adjustments:
# (i) if d1 is EOM, set d1 to 30
mask1 = self._eom_mask(y1, m1, d1)
d1[mask1] = 30
# (ii) if d2 is EOM, set d2 to 30
mask2 = self._eom_mask(y2, m2, d2)
d2[mask2] = 30
# Call core function
days = self._days_30360_core(y1, m1, d1, y2, m2, d2)
if len(days) == 1:
return days[0]
else:
return days
elif self.dc == '30E+/360':
y1, m1, d1, y2, m2, d2 = self._date_parser(d1, d2)
# Adjustments:
# (i) if d1 is 31, set d1 to 30
d1 = minimum(d1, 30)
# (ii) if d2 = 31, set date to first day of next month
mask = d2 == 31
d2[mask] = 1
m2[mask] = (m2[mask] + 1) % 12
i, r = divmod((m2[mask] + 1), 12)
y2[mask] = y2[mask] + i
# Call core function
days = self._days_30360_core(y1, m1, d1, y2, m2, d2)
if len(days) == 1:
return days[0]
else:
return days
# Deal with actual conventions
if self.dc in ['ACT/ACT ISDA', 'ACT/365', 'ACT/365A', 'ACT/365F',
'ACT/364', 'ACT/360', 'ACT/365L', 'ACT/ACT AFB',
'ACT/ACT ICMA']:
return self.daysnodc(d1, d2)
elif self.dc == 'NL/365':
return self.daysnodc(d1, d2) - self.leapdays(d1, d2)
# Deal with the bizarre 1/1 convention
if self.dc == '1/1':
return self.daysnodc(d1, d2)
def adjust(self, d):
"""Apply adjustment (following, preceding etc) to date d or array
Note that we return either a Timestamp or a DatetimeIndex so that
methods to come may use properties such as year or month on the array
"""
if self.adj is None:
return | to_datetime(d) | pandas.to_datetime |
import matplotlib.pyplot as plt
import seaborn as sb
from seaborn import lineplot
import pandas as pd
import json
import random
#dependency, REFACTOR can be replaced by more efficient plot lib
from drawnow import drawnow
import time
#multiple threads
from multiprocessing import Process
class Graph():
def __init__(self,*args):
self.internaldata = []
global graph_counter
graph_counter = 0
sb.set_style(style="darkgrid")
sb.set(font="Gentium")
sb.color_palette("crest", as_cmap=True)
for t in args:
if(t == 'light' or t == 'Light'):
# scientific light
textColor:str = '#252223'
backgroundColor:str = '#EAEAF2'
gridColor:str = '#FFFFFF'
gridOuter:str = '#EAEAF2'
elif(t == 'dark' or t == 'Dark'):
textColor:str = '#BFBAB0'
backgroundColor:str = '#1F2430'
gridColor:str = '#6F6F6F'
gridOuter:str = '#1F2430'
sb.set(rc={'axes.facecolor':backgroundColor,
'figure.facecolor':backgroundColor,
'grid.color':gridColor,
'text.color':textColor,
'axes.labelcolor':textColor,
'axes.edgecolor':gridOuter,
'xtick.color':textColor,
'ytick.color':textColor})
#need to pass self for scope context
def adddata(self,*args):
global graph_counter
for g in args:
if graph_counter == 0:
graph_counter = graph_counter + 1
graphname = 'data' + str(graph_counter)
graphname = pd.DataFrame(g)
print("Graph # {} initialized".format(graph_counter))
else:
#append data after creating
#first graph
graph_2 = pd.DataFrame(g)
graphname['y_'+str(graph_counter)] = | pd.Series(graph_2['y']) | pandas.Series |
import argparse, logging
import numpy as np
import networkx as nx
import node2vec
import node2vec_stack
import graph
import construct_from_data
import scipy.io
import pandas as pd
import pickle
import sys
import os
import tensorflow as tf
import keras
import warnings
import pdb
import requests
import datetime
import re
import matplotlib.pyplot as plt
import utils_data
import extract_team_GAT
import spektral
import tensorboard
from tensorflow.keras import backend as K
from tensorflow.keras import layers, initializers
from keras.engine.topology import Layer
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Activation, Input, Lambda, Concatenate, Dropout, ReLU, Reshape,Flatten
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.utils import plot_model
from datetime import date,timedelta
np.set_printoptions(threshold=sys.maxsize)
#Diffusion Convolution Network:
#<NAME>, Diffusion Convolutional Neural Network, November 15, 2015
#arXiv:1511.02136v6 [cs.LG]
#Convolution operations performed when constructing inputs in ncaabwalkod_train() and ncaabwalkod_test()
def DCNN_ncaabwalkod(height,node2vec_dim,N):
inputs = Input(shape=(2,2*height*node2vec_dim,))
last_5_input = Input(shape = (10,))
one_hot_input = Input(shape=(2*N,))
branch_outputs = []
dense1 = Dense(int(np.floor(2*node2vec_dim*height)),activation = 'tanh')
drop1 = Dropout(.2)
dense2 = Dense(int(np.floor(height*node2vec_dim)))
drop2 = Dropout(.1)
dense3 = Dense(int(np.floor(height*node2vec_dim/5)))
for i in range(2):
ha = Lambda(lambda x: x[:, i, :], name = "Lambda_" + str(i))(inputs)
offdef = dense1(ha)
offdef = drop1(offdef)
offdef = dense2(offdef)
offdef = drop2(offdef)
fin = dense3(offdef)
pts = Dense(1)(fin)
branch_outputs.append(pts)
prediction = Concatenate()(branch_outputs)
model = Model(inputs = [inputs,last_5_input,one_hot_input], outputs = prediction)
return model
#General Graph Neural Network
#<NAME>. 2020.Design Space for Graph Neural Networks. NeurIPS 2020
#arXiv:2011.08843v1
#implemented with spektral: https://github.com/danielegrattarola/spektral
def ncaab_gen(node2vec_dim,N):
channels = 50
node2vec_input = Input(shape=(2*(N+1),node2vec_dim))
A_input = Input(shape=(2*(N+1),2*(N+1)))
A_input_sp = extract_team_GAT.To_Sparse()(A_input)
team_inputs = Input(shape=(2,),dtype = tf.int64)
team_inputs_AH = Input(shape=(2,),dtype = tf.int64)
last_5_input = Input(shape = (10,))
one_hot_input = Input(shape=(2*N,))
one_hot_input_AH = Input(shape=(2*N,))
conv = spektral.layers.GeneralConv(channels= channels, batch_norm=True, dropout=0.0, aggregate='sum', activation='relu', use_bias=True,
kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None,
bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None)([node2vec_input,A_input_sp])
#extracts nodes for link prediction
game_vec_HA = extract_team_GAT.Game_Vec(channels,N)([team_inputs,conv])
game_vec_AH = extract_team_GAT.Game_Vec(channels,N)([team_inputs_AH,conv])
HA = Reshape((int(np.floor(2*channels)),))(game_vec_HA)
AH = Reshape((int(np.floor(2*channels)),))(game_vec_AH)
HA = Concatenate()([HA,one_hot_input])
AH = Concatenate()([AH,one_hot_input_AH])
dense1 = Dense(int(np.floor(2.5*channels)),activation = 'tanh')
drop1 = Dropout(.01)
dense2 = Dense(int(np.floor(channels)),activation = 'tanh')
drop2 = Dropout(.01)
pred = Dense(1)
HA = dense1(HA)
HA = drop1(HA)
HA = dense2(HA)
HA = drop2(HA)
AH = dense1(AH)
AH = drop1(AH)
AH = dense2(AH)
AH = drop2(AH)
pts_home = pred(HA)
pts_away = pred(AH)
prediction = Concatenate()([pts_home,pts_away])
model = Model(inputs = [team_inputs,team_inputs_AH,node2vec_input,A_input,last_5_input,one_hot_input,one_hot_input_AH], outputs = prediction)
return model
#ARMA model
#<NAME>, <NAME>, <NAME>, <NAME>
#Graph Neural Networks with convolutional ARMA filters,January 15,2019
#arXiv:1901.01343v7 [cs.LG]
#implemented with spektral: https://github.com/danielegrattarola/spektral
def ncaab_ARMA(node2vec_dim,N):
channels = 40
node2vec_input = Input(shape=(2*(N+1),node2vec_dim))
A_input = Input(shape=(2*(N+1),2*(N+1)))
A_input_sp = extract_team_GAT.To_Sparse()(A_input)
team_inputs = Input(shape=(2,),dtype = tf.int64)
team_inputs_AH = Input(shape=(2,),dtype = tf.int64)
last_5_input = Input(shape = (10,))
one_hot_input = Input(shape=(2*N,))
one_hot_input_AH = Input(shape=(2*N,))
ARMA = spektral.layers.ARMAConv(channels, order=4, iterations=1, share_weights=True, gcn_activation='relu',
dropout_rate=0.2, activation='relu', use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros',
kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None)([node2vec_input,A_input])
#extracts nodes for link prediction
game_vec_HA = extract_team_GAT.Game_Vec(channels,N)([team_inputs,ARMA])
game_vec_AH = extract_team_GAT.Game_Vec(channels,N)([team_inputs_AH,ARMA])
HA = Reshape((int(np.floor(2*channels)),))(game_vec_HA)
AH = Reshape((int(np.floor(2*channels)),))(game_vec_AH)
HA = Concatenate()([HA,one_hot_input])
AH = Concatenate()([AH,one_hot_input_AH])
dense1 = Dense(int(np.floor(2.5*channels)),activation = 'tanh')
drop1 = Dropout(.01)
dense2 = Dense(int(np.floor(channels)),activation = 'tanh')
drop2 = Dropout(.01)
pred = Dense(1)
HA = dense1(HA)
HA = drop1(HA)
HA = dense2(HA)
HA = drop2(HA)
AH = dense1(AH)
AH = drop1(AH)
AH = dense2(AH)
AH = drop2(AH)
pts_home = pred(HA)
pts_away = pred(AH)
prediction = Concatenate()([pts_home,pts_away])
model = Model(inputs = [team_inputs,team_inputs_AH,node2vec_input,A_input,last_5_input,one_hot_input,one_hot_input_AH], outputs = prediction)
return model
def ncaab_gin(node2vec_dim,N):
channels = 50
node2vec_input = Input(shape=(2*(N+1),node2vec_dim))
A_input = Input(shape=(2*(N+1),2*(N+1)))
team_inputs = Input(shape=(2,),dtype = tf.int64)
team_inputs_AH = Input(shape=(2,),dtype = tf.int64)
last_5_input = Input(shape = (10,))
one_hot_input = Input(shape=(2*N,))
one_hot_input_AH = Input(shape=(2*N,))
A_input_sp = extract_team_GAT.To_Sparse()(A_input)
GIN = spektral.layers.GINConv(channels, epsilon=None, mlp_hidden=[channels, channels], mlp_activation='elu', aggregate='sum', activation= 'relu',
use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None,
bias_regularizer=None, activity_regularizer=None, kernel_constraint=None,
bias_constraint=None)([node2vec_input,A_input_sp])
game_vec_HA = extract_team_GAT.Game_Vec(channels,N)([team_inputs,GIN])
game_vec_AH = extract_team_GAT.Game_Vec(channels,N)([team_inputs_AH,GIN])
HA = Reshape((int(np.floor(2*channels)),))(game_vec_HA)
AH = Reshape((int(np.floor(2*channels)),))(game_vec_AH)
HA = Concatenate()([HA,one_hot_input])
AH = Concatenate()([AH,one_hot_input_AH])
dense1 = Dense(int(np.floor(2.5*channels)),activation = 'tanh')
drop1 = Dropout(.3)
dense2 = Dense(int(np.floor(channels)),activation = 'tanh')
drop2 = Dropout(.05)
pred = Dense(1)
HA = dense1(HA)
HA = drop1(HA)
HA = dense2(HA)
HA = drop2(HA)
AH = dense1(AH)
AH = drop1(AH)
AH = dense2(AH)
AH = drop2(AH)
pts_home = pred(HA)
pts_away = pred(AH)
prediction = Concatenate()([pts_home,pts_away])
model = Model(inputs = [team_inputs,team_inputs_AH,node2vec_input,A_input,last_5_input,one_hot_input,one_hot_input_AH], outputs = prediction)
return model
def main():
#select model type and year
#model_type = 'ncaabwalkod'
#model_type = 'ncaab_gen'
model_type = 'ncaab_ARMA'
#model_type = 'ncaab_gin'
year = 2021
#select day range on which to test the model
startdate = datetime.datetime(year,3,28)
stopdate = datetime.datetime(year,3,29)
#plots = 'on'
plots = 'off'
start_day = (startdate-datetime.datetime(year-1,10,12)).days
stop_day = (stopdate-datetime.datetime(year-1,10,12)).days
startstring = startdate.strftime("%m_%d_%Y")
stopstring = stopdate.strftime("%m_%d_%Y")
now = datetime.datetime.now()
datestring = now.strftime("%m_%d_%Y")
today = (now-datetime.datetime(year-1,10,12)).days
TeamList = pd.read_excel('data/TeamLists.xls',sheet_name = year-2015,header = None)
TeamList = TeamList.to_numpy(dtype = object,copy = True)
#edgeweights when constructing the Offense and Defense Statistic graphs SOffDef, G_orc
weights = pd.read_excel('data/weights.xls',sheet_name = 0,header = 0)
weights = weights.to_numpy(dtype = object,copy = True)
tourney_games = pd.read_excel('data/tourney_games8.xls',sheet_name = 0,header = 0)
tourney_games = tourney_games.to_numpy(dtype = object,copy = True)
with open('pickles/ncaabdata_pickled/'+str(year)+'ncaabdata.pkl', 'rb') as Data:
Data_Full = pickle.load(Data)
N = Data_Full.shape[2]
for i in range(364):
for j in range(36):
for k in range(N):
if Data_Full[i,j,k] is None:
Data_Full[i,j,k] = 0
schedule, HomeAway = utils_data.format_schedule(Data_Full,TeamList,year)
#if year < 2021:
# Lines = utils_data.Lines(Data_Full,schedule,HomeAway,TeamList_Lines,year)
#if year == 2021:
# Lines = utils_data.Lines_2021(Data_Full,schedule,HomeAway,TeamList_Lines,year)
Lines = np.zeros((N,364),dtype = float)
ats_bets = 0
ats_wins = 0
total_bets = 0
total_wins = 0
money_line_wins = 0
moneyline_count = 0
window = 0 #parameter to constrain the test set to games where the model prediction and vegas prediction differ more than 'window'
push = 0
ties = 0
loss = 0
runs = 0
test_games_all = np.zeros((5000,9),dtype = object)
test_count = 0
#For each day a game occurs, the model constructs a training and validation set using all games played previously in the season
#The model is tested on games occuring the current day
for day in range(start_day,stop_day):
if np.sum(schedule[:,day+1]) != -1*N:
runs = runs + 1
#Construct S_oracle and Pts
#Graph constructed according to:
#Balreira, Miceli, Tegtmeyer, An Oracle method to predict NFL games,
#http://ramanujan.math.trinity.edu/bmiceli/research/NFLRankings_revised_print.pdf
#using data from https://github.com/roclark/sportsipy
S_OffDef, A_OffDef = construct_from_data.construct_S_orc(Data_Full,schedule,HomeAway,weights,day)
#Vegas_Graph = construct_from_data.Vegas_Graph(schedule,Lines,day)
A_Veg = A_OffDef[0:N+1,N+1:2*(N+1)]
ARMA = spektral.utils.convolution.normalized_laplacian(A_OffDef)
ARMA = spektral.utils.convolution.rescale_laplacian(ARMA)
#ARMA_Veg = spektral.utils.convolution.normalized_laplacian(A_Veg)
#ARMA_Veg = spektral.utils.convolution.rescale_laplacian(ARMA_Veg)
epsilon = .001 #hyperparameter to perform PageRank
#hyperparameters for node2vec
#Grover, Leskovec, node2vec: Scalable Feature Learning for Networks, July 3, 2016 #arXiv:1607.00653v1 [cs.SI]
node2vec_dim = 40
node2vec_p = 1
node2vec_q = 1
height = 8
n2v_walklen = 10
n2v_numwalks = 20
n2v_wsize = 8
n2v_iter = 1
n2v_workers = 8
if model_type == 'ncaabwalkod' or model_type == 'ncaab_gen' or model_type == 'ncaab_ARMA' or model_type == 'ncaab_gin':
G_orc = (1-epsilon)*(S_OffDef) + epsilon*(1/2*(N+1))*np.ones((2*(N+1),2*(N+1)),dtype = float)
G_orc = utils_data.sto_mat(G_orc)
PageRank_Off, PageRank_Def = construct_from_data.PageRank(G_orc,TeamList)
args_N = node2vec_stack.node2vec_input(S_OffDef,'emb/ncaab'+str(year)+'node2vec_OffDef.txt',node2vec_dim,n2v_walklen,
n2v_numwalks,n2v_wsize,n2v_iter,n2v_workers,node2vec_p,node2vec_q,True,True,False,False)
featurevecs = node2vec_stack.feat_N(args_N)
feature_node2vec = np.zeros((2*(N+1),node2vec_dim),dtype = float)
for j in range(2*(N+1)):
feature_node2vec[j,:] = featurevecs[str(j)]
#if plots == 'on':
# utils_data.plot_node2vec(feature_node2vec_Veg,TeamList_Lines,PageRank_Off,PageRank_Def,Vegas_Graph)
if model_type == 'ncaabwalkod':
S_OffDef_stack = np.zeros((2*(N+1),2*(N+1),height),dtype = float)
for j in range(height):
S_OffDef_stack[:,:,j] = np.linalg.matrix_power(S_OffDef,j+1)
x_train, y_train, last_5_train, one_hot_train = construct_from_data.Training_Set_ncaabwalkod(Data_Full,Lines,schedule,HomeAway,day,
S_OffDef_stack,feature_node2vec,height,node2vec_dim)
elif model_type == 'ncaab_gen':
x_train,x_train_AH,y_train,feature_train,A_Train,last_5_train,one_hot_train,one_hot_train_AH = construct_from_data.GAT_training_set(Data_Full,
Lines,schedule,HomeAway,
day,feature_node2vec,
A_OffDef)
elif model_type == 'ncaab_ARMA':
x_train,x_train_AH,y_train,feature_train,A_Train,last_5_train,one_hot_train,one_hot_train_AH = construct_from_data.GAT_training_set(Data_Full,
Lines,schedule,HomeAway,
day,feature_node2vec,
ARMA)
elif model_type == 'ncaab_gin':
x_train,x_train_AH,y_train,feature_train,A_Train, last_5_train,one_hot_train,one_hot_train_AH = construct_from_data.GAT_training_set(Data_Full,
Lines,schedule,HomeAway,
day,feature_node2vec,
A_OffDef)
call_backs = EarlyStopping(monitor='val_loss', min_delta=0, patience=150, verbose=1, restore_best_weights= False)
#Train the model on all previous games
#opt = SGD(lr = .001)
opt = Adam(learning_rate=0.001)
if model_type == 'ncaabwalkod':
model = DCNN_ncaabwalkod(height,node2vec_dim,N)
model.compile(loss='mean_squared_error', optimizer= opt, metrics=['accuracy'])
model.fit([x_train,last_5_train,one_hot_train],y_train,
epochs = 10, batch_size = 15, validation_split = 0.05,callbacks = [call_backs])
model.summary()
elif model_type == 'ncaab_gen':
model = ncaab_gen(node2vec_dim,N)
model.compile(loss='mean_squared_error', optimizer= opt, metrics=['accuracy'])
model.fit([x_train,x_train_AH,feature_train,A_Train,last_5_train,one_hot_train,one_hot_train_AH],y_train,
epochs = 3,batch_size = 1,validation_split = 0.05,callbacks = [call_backs])
model.summary()
elif model_type == 'ncaab_ARMA':
model = ncaab_ARMA(node2vec_dim,N)
model.compile(loss='mean_squared_error', optimizer= opt, metrics=['accuracy'])
model.fit([x_train,x_train_AH,feature_train,A_Train,last_5_train,one_hot_train,one_hot_train_AH],y_train,
epochs = 3,batch_size = 1,validation_split = 0.05,callbacks = [call_backs])
model.summary()
elif model_type == 'ncaab_gin':
model = ncaab_gin(node2vec_dim,N)
model.compile(loss='mean_squared_error', optimizer= opt, metrics=['accuracy'])
model.fit([x_train,x_train_AH,feature_train,A_Train,last_5_train,one_hot_train,one_hot_train_AH],y_train,
epochs = 3,batch_size = 1,validation_split = 0.05,callbacks = [call_backs])
model.summary()
games, gameteams, testgamecount = construct_from_data.Test_Games(TeamList,Data_Full,schedule,HomeAway,Lines,day)
if model_type == 'ncaabwalkod':
x_test, last_5_test, test_y,one_hot_test = construct_from_data.Test_Set_ncaabwalkod(Data_Full,games,testgamecount,S_OffDef_stack,
feature_node2vec,height,node2vec_dim,day,year)
Pred = model.predict([x_test,last_5_test,one_hot_test])
x_test_trn, last_5_test_trn,one_hot_test_trn = construct_from_data.tourney_set_ncaabwalkod(TeamList,S_OffDef_stack,feature_node2vec,height,node2vec_dim)
Pred_trn = model.predict([x_test_trn,last_5_test_trn,one_hot_test_trn])
if year < 2021:
Eval = model.evaluate([x_test,last_5_test,one_hot_test],test_y,verbose=0)
loss = loss + Eval[0]
#test the model, print predictions, the ATS Win %, ML Win % and the MSE for the test set
elif model_type == 'ncaab_gen':
x_test,x_test_AH,feature_test,A_test,last_5_test,test_y,one_hot_test,one_hot_test_AH = construct_from_data.GAT_test_set(Data_Full,games,
testgamecount,feature_node2vec,
A_OffDef,day,year)
Pred = model.predict([x_test,x_test_AH,feature_test,A_test,last_5_test,one_hot_test,one_hot_test_AH],batch_size=1)
x_test_trn,x_test_trn_AH,feature_test_trn,A_Test,last_5_test_trn,one_hot_test_trn,one_hot_test_trn_AH = construct_from_data.tourney_set_GAT(TeamList,
feature_node2vec,A_OffDef)
Pred_trn = model.predict([x_test_trn,x_test_trn_AH,feature_test_trn,A_Test,last_5_test_trn,one_hot_test_trn,one_hot_test_trn_AH],batch_size=1)
if year < 2021:
Eval = model.evaluate([x_test,x_test_AH,feature_test,A_test,last_5_test,one_hot_test,one_hot_test_AH],test_y,verbose=0,batch_size=1)
loss = loss + Eval[0]
elif model_type == 'ncaab_ARMA':
x_test,x_test_AH,feature_test,A_test,last_5_test, test_y,one_hot_test,one_hot_test_AH = construct_from_data.GAT_test_set(Data_Full,games,
testgamecount,feature_node2vec,
ARMA,day,year)
Pred = model.predict([x_test,x_test_AH,feature_test,A_test,last_5_test,one_hot_test,one_hot_test_AH],batch_size=1)
x_test_trn,x_test_trn_AH,feature_test_trn,A_Test,last_5_test_trn,one_hot_test_trn,one_hot_test_trn_AH = construct_from_data.tourney_set_GAT(TeamList,
feature_node2vec,A_OffDef)
Pred_trn = model.predict([x_test_trn,x_test_trn_AH,feature_test_trn,A_Test,last_5_test_trn,one_hot_test_trn,one_hot_test_trn_AH],batch_size=1)
if year < 2021:
Eval = model.evaluate([x_test,x_test_AH,feature_test,A_test,last_5_test,one_hot_test,one_hot_test_AH],test_y,verbose=0,batch_size=1)
loss = loss + Eval[0]
elif model_type == 'ncaab_gin':
x_test,x_test_AH,feature_test,A_test,last_5_test, test_y,one_hot_test,one_hot_test_AH = construct_from_data.gin_test_set(Data_Full,games,
testgamecount,feature_node2vec,
A_OffDef,day,year)
Pred = model.predict([x_test,x_test_AH,feature_test,A_test,last_5_test,one_hot_test,one_hot_test_AH],batch_size=1)
x_test_trn,x_test_trn_AH,feature_test,A_Test,last_5_test_trn,one_hot_test_trn,one_hot_test_trn_AH = construct_from_data.tourney_set_gin(TeamList,
feature_node2vec,A_OffDef)
Pred_trn = model.predict([x_test_trn,x_test_trn_AH,feature_test,A_Test,last_5_test_trn,one_hot_test_trn,one_hot_test_trn_AH],batch_size=1)
if year < 2021:
Eval = model.evaluate([x_test,x_test_AH,feature_test,A_test,last_5_test,one_hot_test,one_hot_test_AH],test_y,verbose=0,batch_size=1)
loss = loss + Eval[0]
results= np.round(Pred,decimals = 1)
results_trn = np.round(Pred_trn,decimals = 1)
games = np.concatenate((games,Pred),axis = 1)
tourney_games = np.concatenate((tourney_games,results_trn),axis = 1)
test_count = test_count + games.shape[0]
test_games_all[(test_count - games.shape[0]):test_count,:] = games
gameteams = np.concatenate((gameteams,results),axis = 1)
df = | pd.DataFrame(gameteams) | pandas.DataFrame |
from PySDDP.dessem.script.templates.dadger import DadgerTemplate
import pandas as pd
import os
from typing import IO
COMENTARIO = '&'
class Dadger(DadgerTemplate):
"""
Classe que contem todos os elementos comuns a qualquer versao do arquivo Entdados do Dessem.
Esta classe tem como intuito fornecer duck typing para a classe Dessem e ainda adicionar um nivel de especificacao
dentro da fabrica. Alem disso esta classe deve passar adiante a responsabilidade da implementacao dos metodos de
leitura e escrita
"""
def __init__(self):
super().__init__()
self.comentarios = list()
self.tm = dict()
self.sist = dict()
self.ree = dict()
self.uh = dict()
self.tviag = dict()
self.ut = dict()
self.usie = dict()
self.dp = dict()
self.de = dict()
self.cd = dict()
self.ri = dict()
self.ia = dict()
self.rd = dict()
self.rivar = dict()
self.it = dict()
self.gp = dict()
self.ni = dict()
self.ve = dict()
self.ci_ce = dict()
self.re = dict()
self.lu = dict()
self.fh = dict()
self.ft = dict()
self.fi = dict()
self.fe = dict()
self.fr = dict()
self.fc = dict()
self.ac = dict()
self.da = dict()
self.fp = dict()
self.ez = dict()
self.ag = dict()
self.mh = dict()
self.mt = dict()
self.tx = dict()
self.pq = dict()
self.secr = dict()
self.cr = dict()
self.r11 = dict()
self.vr = dict()
self.pd = dict()
self.vm = dict()
self.df = dict()
self.me = dict()
self.meta_cjsist = dict()
self.meta_sist = dict()
self.meta_usit = dict()
self.sh = dict()
self.tf = dict()
self.rs = dict()
self.sp = dict()
self.ps = dict()
self.pp = dict()
def ler(self, file_name: str) -> None:
self.entdados = list()
# listas referentes ao dicionário TM
self.tm['mne'] = list()
self.tm['dd'] = list()
self.tm['hr'] = list()
self.tm['mh'] = list()
self.tm['durac'] = list()
self.tm['rede'] = list()
self.tm['patamar'] = list()
# listas referentes ao dicionário SIST
self.sist['mne'] = list()
self.sist['num'] = list()
self.sist['mne_iden'] = list()
self.sist['flag'] = list()
self.sist['nome'] = list()
# listas referentes ao dicionário REE
self.ree['mne'] = list()
self.ree['num_ree'] = list()
self.ree['num_sub'] = list()
self.ree['nome'] = list()
# listas referentes ao dicionário UH
self.uh['mne'] = list()
self.uh['ind'] = list()
self.uh['nome'] = list()
self.uh['ss'] = list()
self.uh['vinic'] = list()
self.uh['evap'] = list()
self.uh['di'] = list()
self.uh['hi'] = list()
self.uh['m'] = list()
self.uh['vmor'] = list()
self.uh['prod'] = list()
self.uh['rest'] = list()
# listas referentes ao dicionário TVIAG
self.tviag['mne'] = list()
self.tviag['mont'] = list()
self.tviag['jus'] = list()
self.tviag['tp'] = list()
self.tviag['hr'] = list()
self.tviag['tpTviag'] = list()
# listas referentes ao dicionário UT
self.ut['mne'] = list()
self.ut['num'] = list()
self.ut['nome'] = list()
self.ut['ss'] = list()
self.ut['flag'] = list()
self.ut['di'] = list()
self.ut['hi'] = list()
self.ut['mi'] = list()
self.ut['df'] = list()
self.ut['hf'] = list()
self.ut['mf'] = list()
self.ut['rest'] = list()
self.ut['gmin'] = list()
self.ut['gmax'] = list()
self.ut['g_anterior'] = list()
# listas referentes ao dicionário USIE
self.usie['mne'] = list()
self.usie['num'] = list()
self.usie['ss'] = list()
self.usie['nome'] = list()
self.usie['mont'] = list()
self.usie['jus'] = list()
self.usie['qmin'] = list()
self.usie['qmax'] = list()
self.usie['taxa_consumo'] = list()
# listas referentes ao dicionário DP
self.dp['mne'] = list()
self.dp['ss'] = list()
self.dp['di'] = list()
self.dp['hi'] = list()
self.dp['mi'] = list()
self.dp['df'] = list()
self.dp['hf'] = list()
self.dp['mf'] = list()
self.dp['demanda'] = list()
# listas referentes ao dicionário DE
self.de['mne'] = list()
self.de['nde'] = list()
self.de['di'] = list()
self.de['hi'] = list()
self.de['mi'] = list()
self.de['df'] = list()
self.de['hf'] = list()
self.de['mf'] = list()
self.de['demanda'] = list()
self.de['justific'] = list()
# listas referentes ao dicionário CD
self.cd['mne'] = list()
self.cd['is'] = list()
self.cd['cd'] = list()
self.cd['di'] = list()
self.cd['hi'] = list()
self.cd['mi'] = list()
self.cd['df'] = list()
self.cd['hf'] = list()
self.cd['mf'] = list()
self.cd['custo'] = list()
self.cd['limsup'] = list()
# listas referentes ao dicionário RI
self.ri['mne'] = list()
self.ri['di'] = list()
self.ri['hi'] = list()
self.ri['mi'] = list()
self.ri['df'] = list()
self.ri['hf'] = list()
self.ri['mf'] = list()
self.ri['gh50min'] = list()
self.ri['gh50max'] = list()
self.ri['gh60min'] = list()
self.ri['gh60max'] = list()
self.ri['ande'] = list()
# listas referentes ao dicionário IA
self.ia['mne'] = list()
self.ia['ss1'] = list()
self.ia['ss2'] = list()
self.ia['di'] = list()
self.ia['hi'] = list()
self.ia['mi'] = list()
self.ia['df'] = list()
self.ia['hf'] = list()
self.ia['mf'] = list()
self.ia['ss1_ss2'] = list()
self.ia['ss2_ss1'] = list()
# listas referentes ao dicionário RD
self.rd['mne'] = list()
self.rd['flag_fol'] = list()
self.rd['ncirc'] = list()
self.rd['dbar'] = list()
self.rd['lim'] = list()
self.rd['dlin'] = list()
self.rd['perd'] = list()
self.rd['formato'] = list()
# listas referentes ao dicionário RIVAR
self.rivar['mne'] = list()
self.rivar['num'] = list()
self.rivar['ss'] = list()
self.rivar['cod'] = list()
self.rivar['penalidade'] = list()
# listas referentes ao dicionário IT
self.it['mne'] = list()
self.it['num'] = list()
self.it['coef'] = list()
# listas referentes ao dicionário GP
self.gp['mne'] = list()
self.gp['tol_conv'] = list()
self.gp['tol_prob'] = list()
# listas referentes ao dicionário NI
self.ni['mne'] = list()
self.ni['flag'] = list()
self.ni['nmax'] = list()
# listas referentes ao dicionário VE
self.ve['mne'] = list()
self.ve['ind'] = list()
self.ve['di'] = list()
self.ve['hi'] = list()
self.ve['mi'] = list()
self.ve['df'] = list()
self.ve['hf'] = list()
self.ve['mf'] = list()
self.ve['vol'] = list()
# listas referentes ao dicionário CI/CE
self.ci_ce['mne'] = list()
self.ci_ce['num'] = list()
self.ci_ce['nome'] = list()
self.ci_ce['ss_busf'] = list()
self.ci_ce['flag'] = list()
self.ci_ce['di'] = list()
self.ci_ce['hi'] = list()
self.ci_ce['mi'] = list()
self.ci_ce['df'] = list()
self.ci_ce['hf'] = list()
self.ci_ce['mf'] = list()
self.ci_ce['unid'] = list()
self.ci_ce['linf'] = list()
self.ci_ce['lsup'] = list()
self.ci_ce['custo'] = list()
self.ci_ce['energia'] = list()
# listas referentes ao dicionário RE
self.re['mne'] = list()
self.re['ind'] = list()
self.re['di'] = list()
self.re['hi'] = list()
self.re['mi'] = list()
self.re['df'] = list()
self.re['hf'] = list()
self.re['mf'] = list()
# listas referentes ao dicionário LU
self.lu['mne'] = list()
self.lu['ind'] = list()
self.lu['di'] = list()
self.lu['hi'] = list()
self.lu['mi'] = list()
self.lu['df'] = list()
self.lu['hf'] = list()
self.lu['mf'] = list()
self.lu['linf'] = list()
self.lu['lsup'] = list()
# listas referentes ao dicionário FH
self.fh['mne'] = list()
self.fh['ind'] = list()
self.fh['di'] = list()
self.fh['hi'] = list()
self.fh['mi'] = list()
self.fh['df'] = list()
self.fh['hf'] = list()
self.fh['mf'] = list()
self.fh['ush'] = list()
self.fh['unh'] = list()
self.fh['fator'] = list()
# listas referentes ao dicionário FT
self.ft['mne'] = list()
self.ft['ind'] = list()
self.ft['di'] = list()
self.ft['hi'] = list()
self.ft['mi'] = list()
self.ft['df'] = list()
self.ft['hf'] = list()
self.ft['mf'] = list()
self.ft['ust'] = list()
self.ft['fator'] = list()
# listas referentes ao dicionário FI
self.fi['mne'] = list()
self.fi['ind'] = list()
self.fi['di'] = list()
self.fi['hi'] = list()
self.fi['mi'] = list()
self.fi['df'] = list()
self.fi['hf'] = list()
self.fi['mf'] = list()
self.fi['ss1'] = list()
self.fi['ss2'] = list()
self.fi['fator'] = list()
# listas referentes ao dicionário FE
self.fe['mne'] = list()
self.fe['ind'] = list()
self.fe['di'] = list()
self.fe['hi'] = list()
self.fe['mi'] = list()
self.fe['df'] = list()
self.fe['hf'] = list()
self.fe['mf'] = list()
self.fe['num_contrato'] = list()
self.fe['fator'] = list()
# listas referentes ao dicionário FR
self.fr['mne'] = list()
self.fr['ind'] = list()
self.fr['di'] = list()
self.fr['hi'] = list()
self.fr['mi'] = list()
self.fr['df'] = list()
self.fr['hf'] = list()
self.fr['mf'] = list()
self.fr['useol'] = list()
self.fr['fator'] = list()
# listas referentes ao dicionário FC
self.fc['mne'] = list()
self.fc['ind'] = list()
self.fc['di'] = list()
self.fc['hi'] = list()
self.fc['mi'] = list()
self.fc['df'] = list()
self.fc['hf'] = list()
self.fc['mf'] = list()
self.fc['demanda'] = list()
self.fc['fator'] = list()
# listas referentes ao dicionário AC
self.ac['mne'] = list()
self.ac['usi'] = list()
self.ac['mneumonico'] = list()
self.ac['ind'] = list()
self.ac['valor'] = list()
# listas referentes ao dicionário DA
self.da['mne'] = list()
self.da['ind'] = list()
self.da['di'] = list()
self.da['hi'] = list()
self.da['mi'] = list()
self.da['df'] = list()
self.da['hf'] = list()
self.da['mf'] = list()
self.da['taxa'] = list()
self.da['obs'] = list()
# listas referentes ao dicionário FP
self.fp['mne'] = list()
self.fp['usi'] = list()
self.fp['f'] = list()
self.fp['nptQ'] = list()
self.fp['nptV'] = list()
self.fp['concavidade'] = list()
self.fp['min_quadraticos'] = list()
self.fp['deltaV'] = list()
self.fp['tr'] = list()
# listas referentes ao dicionário EZ
self.ez['mne'] = list()
self.ez['usi'] = list()
self.ez['perc_vol'] = list()
# listas referentes ao dicionário AG
self.ag['mne'] = list()
self.ag['num_estagios'] = list()
# listas referentes ao dicionário MH
self.mh['mne'] = list()
self.mh['num'] = list()
self.mh['gr'] = list()
self.mh['id'] = list()
self.mh['di'] = list()
self.mh['hi'] = list()
self.mh['mi'] = list()
self.mh['df'] = list()
self.mh['hf'] = list()
self.mh['mf'] = list()
self.mh['f'] = list()
# listas referentes ao dicionário MT
self.mt['mne'] = list()
self.mt['ute'] = list()
self.mt['ug'] = list()
self.mt['di'] = list()
self.mt['hi'] = list()
self.mt['mi'] = list()
self.mt['df'] = list()
self.mt['hf'] = list()
self.mt['mf'] = list()
self.mt['f'] = list()
# listas referentes ao dicionário TX
self.tx['mne'] = list()
self.tx['taxa_fcf'] = list()
# listas referentes ao dicionário PQ
self.pq['mne'] = list()
self.pq['ind'] = list()
self.pq['nome'] = list()
self.pq['ss/b'] = list()
self.pq['di'] = list()
self.pq['hi'] = list()
self.pq['mi'] = list()
self.pq['df'] = list()
self.pq['hf'] = list()
self.pq['mf'] = list()
self.pq['geracao'] = list()
# listas referentes ao dicionário SECR
self.secr['mne'] = list()
self.secr['num'] = list()
self.secr['nome'] = list()
self.secr['usi_1'] = list()
self.secr['fator_1'] = list()
self.secr['usi_2'] = list()
self.secr['fator_2'] = list()
self.secr['usi_3'] = list()
self.secr['fator_3'] = list()
self.secr['usi_4'] = list()
self.secr['fator_4'] = list()
self.secr['usi_5'] = list()
self.secr['fator_5'] = list()
# listas referentes ao dicionário CR
self.cr['mne'] = list()
self.cr['num'] = list()
self.cr['nome'] = list()
self.cr['gr'] = list()
self.cr['A0'] = list()
self.cr['A1'] = list()
self.cr['A2'] = list()
self.cr['A3'] = list()
self.cr['A4'] = list()
self.cr['A5'] = list()
self.cr['A6'] = list()
# listas referentes ao dicionário R11
self.r11['mne'] = list()
self.r11['di'] = list()
self.r11['hi'] = list()
self.r11['mi'] = list()
self.r11['df'] = list()
self.r11['hf'] = list()
self.r11['mf'] = list()
self.r11['cotaIni'] = list()
self.r11['varhora'] = list()
self.r11['vardia'] = list()
self.r11['coef'] = list()
# listas referentes ao dicionário VR
self.vr['mne'] = list()
self.vr['dia'] = list()
self.vr['mneumo_verao'] = list()
# listas referentes ao dicionário PD
self.pd['mne'] = list()
self.pd['tol_perc'] = list()
self.pd['tol_MW'] = list()
# listas referentes ao dicionário VM
self.vm['mne'] = list()
self.vm['ind'] = list()
self.vm['di'] = list()
self.vm['hi'] = list()
self.vm['mi'] = list()
self.vm['df'] = list()
self.vm['hf'] = list()
self.vm['mf'] = list()
self.vm['taxa_enchimento'] = list()
# listas referentes ao dicionário DF
self.df['mne'] = list()
self.df['ind'] = list()
self.df['di'] = list()
self.df['hi'] = list()
self.df['mi'] = list()
self.df['df'] = list()
self.df['hf'] = list()
self.df['mf'] = list()
self.df['taxa_descarga'] = list()
# listas referentes ao dicionário ME
self.me['mne'] = list()
self.me['ind'] = list()
self.me['di'] = list()
self.me['hi'] = list()
self.me['mi'] = list()
self.me['df'] = list()
self.me['hf'] = list()
self.me['mf'] = list()
self.me['fator'] = list()
# listas referentes ao dicionário META CJSIST
self.meta_cjsist['mneumo'] = list()
self.meta_cjsist['ind'] = list()
self.meta_cjsist['nome'] = list()
# listas referentes ao dicionário META SIST
self.meta_sist['mne'] = list()
self.meta_sist['ind'] = list()
self.meta_sist['tp'] = list()
self.meta_sist['num'] = list()
self.meta_sist['meta'] = list()
self.meta_sist['tol_MW'] = list()
self.meta_sist['tol_perc'] = list()
# listas referentes ao dicionário META USIT
self.meta_usit['mne'] = list()
self.meta_usit['ind'] = list()
self.meta_usit['tp'] = list()
self.meta_usit['num'] = list()
self.meta_usit['meta'] = list()
self.meta_usit['tol_MW'] = list()
self.meta_usit['tol_perc'] = list()
# listas referentes ao dicionário SH
self.sh['mne'] = list()
self.sh['flag_simul'] = list()
self.sh['flag_pl'] = list()
self.sh['num_min'] = list()
self.sh['num_max'] = list()
self.sh['flag_quebra'] = list()
self.sh['ind_1'] = list()
self.sh['ind_2'] = list()
self.sh['ind_3'] = list()
self.sh['ind_4'] = list()
self.sh['ind_5'] = list()
# listas referentes ao dicionário TF
self.tf['mne'] = list()
self.tf['custo'] = list()
# listas referentes ao dicionário RS
self.rs['mne'] = list()
self.rs['cod'] = list()
self.rs['ind'] = list()
self.rs['subs'] = list()
self.rs['tp'] = list()
self.rs['comentario'] = list()
# listas referentes ao dicionário SP
self.sp['mne'] = list()
self.sp['flag'] = list()
# listas referentes ao dicionário PS
self.ps['mne'] = list()
self.ps['flag'] = list()
# listas referentes ao dicionário PP
self.pp['mne'] = list()
self.pp['flag'] = list()
self.pp['iteracoes'] = list()
self.pp['num'] = list()
self.pp['tp'] = list()
try:
with open(file_name, 'r', encoding='latin-1') as f: # type: IO[str]
continua = True
while continua:
self.next_line(f)
linha = self.linha
if linha[0] == COMENTARIO:
self.comentarios.append(linha)
self.entdados.append(linha)
continue
mne = linha[:6].strip().lower()
mne_sigla = linha[:3].strip().lower()
mneumo = linha[:13].strip().lower()
self.entdados.append(linha[:6])
# Leitura dos dados de acordo com o mneumo correspondente
if mne_sigla == 'tm':
self.tm['mne'].append(self.linha[:2])
self.tm['dd'].append(self.linha[4:6])
self.tm['hr'].append(self.linha[9:11])
self.tm['mh'].append(self.linha[14:15])
self.tm['durac'].append(self.linha[19:24])
self.tm['rede'].append(self.linha[29:30])
self.tm['patamar'].append(self.linha[33:39])
continue
if mne == 'sist':
self.sist['mne'].append(self.linha[:6])
self.sist['num'].append(self.linha[7:9])
self.sist['mne_iden'].append(self.linha[10:12])
self.sist['flag'].append(self.linha[13:15])
self.sist['nome'].append(self.linha[16:26])
continue
if mne == 'ree':
self.ree['mne'].append(self.linha[:3])
self.ree['num_ree'].append(self.linha[6:8])
self.ree['num_sub'].append(self.linha[9:11])
self.ree['nome'].append(self.linha[12:22])
continue
if mne_sigla == 'uh':
self.uh['mne'].append(self.linha[:2])
self.uh['ind'].append(self.linha[4:7])
self.uh['nome'].append(self.linha[9:21])
self.uh['ss'].append(self.linha[24:26])
self.uh['vinic'].append(self.linha[29:39])
self.uh['evap'].append(self.linha[39:40])
self.uh['di'].append(self.linha[41:43])
self.uh['hi'].append(self.linha[44:46])
self.uh['m'].append(self.linha[47:48])
self.uh['vmor'].append(self.linha[49:59])
self.uh['prod'].append(self.linha[64:65])
self.uh['rest'].append(self.linha[69:70])
continue
if mne == 'tviag':
self.tviag['mne'].append(self.linha[:6])
self.tviag['mont'].append(self.linha[6:9])
self.tviag['jus'].append(self.linha[10:13])
self.tviag['tp'].append(self.linha[14:15])
self.tviag['hr'].append(self.linha[19:22])
self.tviag['tpTviag'].append(self.linha[24:25])
continue
if mne_sigla == 'ut':
self.ut['mne'].append(self.linha[:2])
self.ut['num'].append(self.linha[4:7])
self.ut['nome'].append(self.linha[9:21])
self.ut['ss'].append(self.linha[22:24])
self.ut['flag'].append(self.linha[25:26])
self.ut['di'].append(self.linha[27:29])
self.ut['hi'].append(self.linha[30:32])
self.ut['mi'].append(self.linha[33:34])
self.ut['df'].append(self.linha[35:37])
self.ut['hf'].append(self.linha[38:40])
self.ut['mf'].append(self.linha[41:42])
self.ut['rest'].append(self.linha[46:47])
self.ut['gmin'].append(self.linha[47:57])
self.ut['gmax'].append(self.linha[57:67])
self.ut['g_anterior'].append(self.linha[67:77])
continue
if mne == 'usie':
self.usie['mne'].append(self.linha[:4])
self.usie['num'].append(self.linha[5:8])
self.usie['ss'].append(self.linha[9:11])
self.usie['nome'].append(self.linha[14:26])
self.usie['mont'].append(self.linha[29:32])
self.usie['jus'].append(self.linha[34:37])
self.usie['qmin'].append(self.linha[39:49])
self.usie['qmax'].append(self.linha[49:59])
self.usie['taxa_consumo'].append(self.linha[59:69])
continue
if mne_sigla == 'dp':
self.dp['mne'].append(self.linha[:2])
self.dp['ss'].append(self.linha[4:6])
self.dp['di'].append(self.linha[8:10])
self.dp['hi'].append(self.linha[11:13])
self.dp['mi'].append(self.linha[14:15])
self.dp['df'].append(self.linha[16:18])
self.dp['hf'].append(self.linha[19:21])
self.dp['mf'].append(self.linha[22:23])
self.dp['demanda'].append(self.linha[24:34])
continue
if mne_sigla == 'de':
self.de['mne'].append(self.linha[:2])
self.de['nde'].append(self.linha[4:7])
self.de['di'].append(self.linha[8:10])
self.de['hi'].append(self.linha[11:13])
self.de['mi'].append(self.linha[14:15])
self.de['df'].append(self.linha[16:18])
self.de['hf'].append(self.linha[19:21])
self.de['mf'].append(self.linha[22:23])
self.de['demanda'].append(self.linha[24:34])
self.de['justific'].append(self.linha[35:45])
continue
if mne_sigla == 'cd':
self.cd['mne'].append(self.linha[:2])
self.cd['is'].append(self.linha[3:5])
self.cd['cd'].append(self.linha[6:8])
self.cd['di'].append(self.linha[9:11])
self.cd['hi'].append(self.linha[12:14])
self.cd['mi'].append(self.linha[15:16])
self.cd['df'].append(self.linha[17:19])
self.cd['hf'].append(self.linha[20:22])
self.cd['mf'].append(self.linha[23:24])
self.cd['custo'].append(self.linha[25:35])
self.cd['limsup'].append(self.linha[35:45])
continue
if mne_sigla == 'ri':
self.ri['mne'].append(self.linha[:2])
self.ri['di'].append(self.linha[8:10])
self.ri['hi'].append(self.linha[11:13])
self.ri['mi'].append(self.linha[14:15])
self.ri['df'].append(self.linha[16:18])
self.ri['hf'].append(self.linha[19:21])
self.ri['mf'].append(self.linha[22:23])
self.ri['gh50min'].append(self.linha[26:36])
self.ri['gh50max'].append(self.linha[36:46])
self.ri['gh60min'].append(self.linha[46:56])
self.ri['gh60max'].append(self.linha[56:66])
self.ri['ande'].append(self.linha[66:76])
continue
if mne_sigla == 'ia':
self.ia['mne'].append(self.linha[:2])
self.ia['ss1'].append(self.linha[4:6])
self.ia['ss2'].append(self.linha[9:11])
self.ia['di'].append(self.linha[13:15])
self.ia['hi'].append(self.linha[16:18])
self.ia['mi'].append(self.linha[19:20])
self.ia['df'].append(self.linha[21:23])
self.ia['hf'].append(self.linha[24:26])
self.ia['mf'].append(self.linha[27:28])
self.ia['ss1_ss2'].append(self.linha[29:39])
self.ia['ss2_ss1'].append(self.linha[39:49])
continue
if mne_sigla == 'rd':
self.rd['mne'].append(self.linha[:2])
self.rd['flag_fol'].append(self.linha[4:5])
self.rd['ncirc'].append(self.linha[8:12])
self.rd['dbar'].append(self.linha[14:15])
self.rd['lim'].append(self.linha[16:17])
self.rd['dlin'].append(self.linha[18:19])
self.rd['perd'].append(self.linha[20:21])
self.rd['formato'].append(self.linha[22:23])
continue
if mne == 'rivar':
self.rivar['mne'].append(self.linha[:5])
self.rivar['num'].append(self.linha[7:10])
self.rivar['ss'].append(self.linha[11:14])
self.rivar['cod'].append(self.linha[15:17])
self.rivar['penalidade'].append(self.linha[19:29])
continue
if mne_sigla == 'it':
self.it['mne'].append(self.linha[:2])
self.it['num'].append(self.linha[4:6])
self.it['coef'].append(self.linha[9:84])
continue
if mne_sigla == 'gp':
self.gp['mne'].append(self.linha[:2])
self.gp['tol_conv'].append(self.linha[4:14])
self.gp['tol_prob'].append(self.linha[15:25])
continue
if mne_sigla == 'ni':
self.ni['mne'].append(self.linha[:2])
self.ni['flag'].append(self.linha[4:5])
self.ni['nmax'].append(self.linha[9:12])
continue
if mne_sigla == 've':
self.ve['mne'].append(self.linha[:2])
self.ve['ind'].append(self.linha[4:7])
self.ve['di'].append(self.linha[8:10])
self.ve['hi'].append(self.linha[11:13])
self.ve['mi'].append(self.linha[14:15])
self.ve['df'].append(self.linha[16:18])
self.ve['hf'].append(self.linha[19:21])
self.ve['mf'].append(self.linha[22:23])
self.ve['vol'].append(self.linha[24:34])
continue
if mne_sigla == 'ci' or mne_sigla == 'ce':
self.ci_ce['mne'].append(self.linha[:2])
self.ci_ce['num'].append(self.linha[3:6])
self.ci_ce['nome'].append(self.linha[7:17])
self.ci_ce['ss_busf'].append(self.linha[18:23])
self.ci_ce['flag'].append(self.linha[23:24])
self.ci_ce['di'].append(self.linha[25:27])
self.ci_ce['hi'].append(self.linha[28:30])
self.ci_ce['mi'].append(self.linha[31:32])
self.ci_ce['df'].append(self.linha[33:35])
self.ci_ce['hf'].append(self.linha[36:38])
self.ci_ce['mf'].append(self.linha[39:40])
self.ci_ce['unid'].append(self.linha[41:42])
self.ci_ce['linf'].append(self.linha[43:53])
self.ci_ce['lsup'].append(self.linha[53:63])
self.ci_ce['custo'].append(self.linha[63:73])
self.ci_ce['energia'].append(self.linha[73:83])
continue
if mne_sigla == 're':
self.re['mne'].append(self.linha[:2])
self.re['ind'].append(self.linha[4:7])
self.re['di'].append(self.linha[9:11])
self.re['hi'].append(self.linha[12:14])
self.re['mi'].append(self.linha[15:16])
self.re['df'].append(self.linha[17:19])
self.re['hf'].append(self.linha[20:22])
self.re['mf'].append(self.linha[23:24])
continue
if mne_sigla == 'lu':
self.lu['mne'].append(self.linha[:2])
self.lu['ind'].append(self.linha[4:7])
self.lu['di'].append(self.linha[8:10])
self.lu['hi'].append(self.linha[11:13])
self.lu['mi'].append(self.linha[14:15])
self.lu['df'].append(self.linha[16:18])
self.lu['hf'].append(self.linha[19:21])
self.lu['mf'].append(self.linha[22:23])
self.lu['linf'].append(self.linha[24:34])
self.lu['lsup'].append(self.linha[34:44])
continue
if mne_sigla == 'fh':
self.fh['mne'].append(self.linha[:2])
self.fh['ind'].append(self.linha[4:7])
self.fh['di'].append(self.linha[8:10])
self.fh['hi'].append(self.linha[11:13])
self.fh['mi'].append(self.linha[14:15])
self.fh['df'].append(self.linha[16:18])
self.fh['hf'].append(self.linha[19:21])
self.fh['mf'].append(self.linha[22:23])
self.fh['ush'].append(self.linha[24:27])
self.fh['unh'].append(self.linha[27:29])
self.fh['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'ft':
self.ft['mne'].append(self.linha[:2])
self.ft['ind'].append(self.linha[4:7])
self.ft['di'].append(self.linha[8:10])
self.ft['hi'].append(self.linha[11:13])
self.ft['mi'].append(self.linha[14:15])
self.ft['df'].append(self.linha[16:18])
self.ft['hf'].append(self.linha[19:21])
self.ft['mf'].append(self.linha[22:23])
self.ft['ust'].append(self.linha[24:27])
self.ft['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'fi':
self.fi['mne'].append(self.linha[:2])
self.fi['ind'].append(self.linha[4:7])
self.fi['di'].append(self.linha[8:10])
self.fi['hi'].append(self.linha[11:13])
self.fi['mi'].append(self.linha[14:15])
self.fi['df'].append(self.linha[16:18])
self.fi['hf'].append(self.linha[19:21])
self.fi['mf'].append(self.linha[22:23])
self.fi['ss1'].append(self.linha[24:26])
self.fi['ss2'].append(self.linha[29:31])
self.fi['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'fe':
self.fe['mne'].append(self.linha[:2])
self.fe['ind'].append(self.linha[4:7])
self.fe['di'].append(self.linha[8:10])
self.fe['hi'].append(self.linha[11:13])
self.fe['mi'].append(self.linha[14:15])
self.fe['df'].append(self.linha[16:18])
self.fe['hf'].append(self.linha[19:21])
self.fe['mf'].append(self.linha[22:23])
self.fe['num_contrato'].append(self.linha[24:27])
self.fe['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'fr':
self.fr['mne'].append(self.linha[:2])
self.fr['ind'].append(self.linha[4:9])
self.fr['di'].append(self.linha[10:12])
self.fr['hi'].append(self.linha[13:15])
self.fr['mi'].append(self.linha[16:17])
self.fr['df'].append(self.linha[18:20])
self.fr['hf'].append(self.linha[21:23])
self.fr['mf'].append(self.linha[24:25])
self.fr['useol'].append(self.linha[26:31])
self.fr['fator'].append(self.linha[36:46])
continue
if mne_sigla == 'fc':
self.fc['mne'].append(self.linha[:2])
self.fc['ind'].append(self.linha[4:7])
self.fc['di'].append(self.linha[10:12])
self.fc['hi'].append(self.linha[13:15])
self.fc['mi'].append(self.linha[16:17])
self.fc['df'].append(self.linha[18:20])
self.fc['hf'].append(self.linha[21:23])
self.fc['mf'].append(self.linha[24:25])
self.fc['demanda'].append(self.linha[26:29])
self.fc['fator'].append(self.linha[36:46])
continue
if mne_sigla == 'ac':
self.ac['mne'].append(self.linha[:2])
self.ac['usi'].append(self.linha[4:7])
self.ac['mneumonico'].append(self.linha[9:15])
self.ac['ind'].append(self.linha[15:19])
self.ac['valor'].append(self.linha[19:])
continue
if mne_sigla == 'da':
self.da['mne'].append(self.linha[:2])
self.da['ind'].append(self.linha[4:7])
self.da['di'].append(self.linha[8:10])
self.da['hi'].append(self.linha[11:13])
self.da['mi'].append(self.linha[14:15])
self.da['df'].append(self.linha[16:18])
self.da['hf'].append(self.linha[19:21])
self.da['mf'].append(self.linha[22:23])
self.da['taxa'].append(self.linha[24:34])
self.da['obs'].append(self.linha[35:47])
continue
if mne_sigla == 'fp':
self.fp['mne'].append(self.linha[:2])
self.fp['usi'].append(self.linha[3:6])
self.fp['f'].append(self.linha[7:8])
self.fp['nptQ'].append(self.linha[10:13])
self.fp['nptV'].append(self.linha[15:18])
self.fp['concavidade'].append(self.linha[20:21])
self.fp['min_quadraticos'].append(self.linha[24:25])
self.fp['deltaV'].append(self.linha[29:39])
self.fp['tr'].append(self.linha[39:49])
continue
if mne_sigla == 'ez':
self.ez['mne'].append(self.linha[:2])
self.ez['usi'].append(self.linha[4:7])
self.ez['perc_vol'].append(self.linha[9:14])
continue
if mne_sigla == 'ag':
self.ag['mne'].append(self.linha[:2])
self.ag['num_estagios'].append(self.linha[3:6])
continue
if mne_sigla == 'mh':
self.mh['mne'].append(self.linha[:2])
self.mh['num'].append(self.linha[4:7])
self.mh['gr'].append(self.linha[9:11])
self.mh['id'].append(self.linha[12:14])
self.mh['di'].append(self.linha[14:16])
self.mh['hi'].append(self.linha[17:19])
self.mh['mi'].append(self.linha[20:21])
self.mh['df'].append(self.linha[22:24])
self.mh['hf'].append(self.linha[25:27])
self.mh['mf'].append(self.linha[28:29])
self.mh['f'].append(self.linha[30:31])
continue
if mne_sigla == 'mt':
self.mt['mne'].append(self.linha[:2])
self.mt['ute'].append(self.linha[4:7])
self.mt['ug'].append(self.linha[8:11])
self.mt['di'].append(self.linha[13:15])
self.mt['hi'].append(self.linha[16:18])
self.mt['mi'].append(self.linha[19:20])
self.mt['df'].append(self.linha[21:23])
self.mt['hf'].append(self.linha[24:26])
self.mt['mf'].append(self.linha[27:28])
self.mt['f'].append(self.linha[29:30])
continue
if mne_sigla == 'tx':
self.tx['mne'].append(self.linha[:2])
self.tx['taxa_fcf'].append(self.linha[4:14])
continue
if mne_sigla == 'pq':
self.pq['mne'].append(self.linha[:2])
self.pq['ind'].append(self.linha[4:7])
self.pq['nome'].append(self.linha[9:19])
self.pq['ss/b'].append(self.linha[19:24])
self.pq['di'].append(self.linha[24:26])
self.pq['hi'].append(self.linha[27:29])
self.pq['mi'].append(self.linha[30:31])
self.pq['df'].append(self.linha[32:34])
self.pq['hf'].append(self.linha[35:37])
self.pq['mf'].append(self.linha[38:39])
self.pq['geracao'].append(self.linha[40:50])
continue
if mne == 'secr':
self.secr['mne'].append(self.linha[:4])
self.secr['num'].append(self.linha[5:8])
self.secr['nome'].append(self.linha[9:21])
self.secr['usi_1'].append(self.linha[24:27])
self.secr['fator_1'].append(self.linha[28:33])
self.secr['usi_2'].append(self.linha[34:37])
self.secr['fator_2'].append(self.linha[38:43])
self.secr['usi_3'].append(self.linha[44:47])
self.secr['fator_3'].append(self.linha[48:53])
self.secr['usi_4'].append(self.linha[54:57])
self.secr['fator_4'].append(self.linha[58:63])
self.secr['usi_5'].append(self.linha[64:67])
self.secr['fator_5'].append(self.linha[68:73])
continue
if mne_sigla == 'cr':
self.cr['mne'].append(self.linha[:2])
self.cr['num'].append(self.linha[4:7])
self.cr['nome'].append(self.linha[9:21])
self.cr['gr'].append(self.linha[24:26])
self.cr['A0'].append(self.linha[27:42])
self.cr['A1'].append(self.linha[43:58])
self.cr['A2'].append(self.linha[59:74])
self.cr['A3'].append(self.linha[75:90])
self.cr['A4'].append(self.linha[91:106])
self.cr['A5'].append(self.linha[107:122])
self.cr['A6'].append(self.linha[123:138])
continue
if mne_sigla == 'r11':
self.r11['mne'].append(self.linha[:3])
self.r11['di'].append(self.linha[4:6])
self.r11['hi'].append(self.linha[7:9])
self.r11['mi'].append(self.linha[10:11])
self.r11['df'].append(self.linha[12:14])
self.r11['hf'].append(self.linha[15:17])
self.r11['mf'].append(self.linha[18:19])
self.r11['cotaIni'].append(self.linha[20:30])
self.r11['varhora'].append(self.linha[30:40])
self.r11['vardia'].append(self.linha[40:50])
self.r11['coef'].append(self.linha[59:164])
continue
if mne_sigla == 'vr':
self.vr['mne'].append(self.linha[:2])
self.vr['dia'].append(self.linha[4:6])
self.vr['mneumo_verao'].append(self.linha[9:12])
continue
if mne_sigla == 'pd':
self.pd['mne'].append(self.linha[:2])
self.pd['tol_perc'].append(self.linha[3:9])
self.pd['tol_MW'].append(self.linha[12:22])
continue
if mne_sigla == 'vm':
self.vm['mne'].append(self.linha[:2])
self.vm['ind'].append(self.linha[4:7])
self.vm['di'].append(self.linha[8:10])
self.vm['hi'].append(self.linha[11:13])
self.vm['mi'].append(self.linha[14:15])
self.vm['df'].append(self.linha[16:18])
self.vm['hf'].append(self.linha[19:21])
self.vm['mf'].append(self.linha[22:23])
self.vm['taxa_enchimento'].append(self.linha[24:34])
continue
if mne_sigla == 'df':
self.df['mne'].append(self.linha[:2])
self.df['ind'].append(self.linha[4:7])
self.df['di'].append(self.linha[8:10])
self.df['hi'].append(self.linha[11:13])
self.df['mi'].append(self.linha[14:15])
self.df['df'].append(self.linha[16:18])
self.df['hf'].append(self.linha[19:21])
self.df['mf'].append(self.linha[22:23])
self.df['taxa_descarga'].append(self.linha[24:34])
continue
if mne_sigla == 'me':
self.me['mne'].append(self.linha[:2])
self.me['ind'].append(self.linha[4:7])
self.me['di'].append(self.linha[8:10])
self.me['hi'].append(self.linha[11:13])
self.me['mi'].append(self.linha[14:15])
self.me['df'].append(self.linha[16:18])
self.me['hf'].append(self.linha[19:21])
self.me['mf'].append(self.linha[22:23])
self.me['fator'].append(self.linha[24:34])
continue
if mneumo == 'meta cjsist':
self.meta_cjsist['mneumo'].append(self.linha[:13])
self.meta_cjsist['ind'].append(self.linha[14:17])
self.meta_cjsist['nome'].append(self.linha[18:20])
continue
if mneumo == 'meta receb':
self.meta_sist['mne'].append(self.linha[:13])
self.meta_sist['ind'].append(self.linha[14:17])
self.meta_sist['tp'].append(self.linha[19:21])
self.meta_sist['num'].append(self.linha[22:23])
self.meta_sist['meta'].append(self.linha[24:34])
self.meta_sist['tol_MW'].append(self.linha[34:44])
self.meta_sist['tol_perc'].append(self.linha[44:54])
continue
if mneumo == 'meta gter':
self.meta_usit['mne'].append(self.linha[:13])
self.meta_usit['ind'].append(self.linha[14:17])
self.meta_usit['tp'].append(self.linha[19:21])
self.meta_usit['num'].append(self.linha[22:23])
self.meta_usit['meta'].append(self.linha[24:34])
self.meta_usit['tol_MW'].append(self.linha[34:44])
self.meta_usit['tol_perc'].append(self.linha[44:54])
continue
if mne_sigla == 'sh':
self.sh['mne'].append(self.linha[:2])
self.sh['flag_simul'].append(self.linha[4:5])
self.sh['flag_pl'].append(self.linha[9:10])
self.sh['num_min'].append(self.linha[14:17])
self.sh['num_max'].append(self.linha[19:22])
self.sh['flag_quebra'].append(self.linha[24:25])
self.sh['ind_1'].append(self.linha[29:32])
self.sh['ind_2'].append(self.linha[34:37])
self.sh['ind_3'].append(self.linha[39:42])
self.sh['ind_4'].append(self.linha[44:47])
self.sh['ind_5'].append(self.linha[49:52])
continue
if mne_sigla == 'tf':
self.tf['mne'].append(self.linha[:2])
self.tf['custo'].append(self.linha[4:14])
continue
if mne_sigla == 'rs':
self.rs['mne'].append(self.linha[:2])
self.rs['cod'].append(self.linha[3:6])
self.rs['ind'].append(self.linha[7:11])
self.rs['subs'].append(self.linha[12:16])
self.rs['tp'].append(self.linha[22:26])
self.rs['comentario'].append(self.linha[27:39])
continue
if mne_sigla == 'sp':
self.sp['mne'].append(self.linha[:2])
self.sp['flag'].append(self.linha[4:5])
continue
if mne_sigla == 'ps':
self.ps['mne'].append(self.linha[:2])
self.ps['flag'].append(self.linha[4:5])
continue
if mne_sigla == 'pp':
self.pp['mne'].append(self.linha[:2])
self.pp['flag'].append(self.linha[3:4])
self.pp['iteracoes'].append(self.linha[5:8])
self.pp['num'].append(self.linha[9:12])
self.pp['tp'].append(self.linha[13:14])
continue
except Exception as err:
if isinstance(err, StopIteration):
self.bloco_tm['df'] = pd.DataFrame(self.tm)
self.bloco_sist['df'] = pd.DataFrame(self.sist)
self.bloco_ree['df'] = pd.DataFrame(self.ree)
self.bloco_uh['df'] = pd.DataFrame(self.uh)
self.bloco_tviag['df'] = pd.DataFrame(self.tviag)
self.bloco_ut['df'] = pd.DataFrame(self.ut)
self.bloco_usie['df'] = pd.DataFrame(self.usie)
self.bloco_dp['df'] = pd.DataFrame(self.dp)
self.bloco_de['df'] = pd.DataFrame(self.de)
self.bloco_cd['df'] = pd.DataFrame(self.cd)
self.bloco_ri['df'] = pd.DataFrame(self.ri)
self.bloco_ia['df'] = pd.DataFrame(self.ia)
self.bloco_rd['df'] = pd.DataFrame(self.rd)
self.bloco_rivar['df'] = pd.DataFrame(self.rivar)
self.bloco_it['df'] = pd.DataFrame(self.it)
self.bloco_gp['df'] = pd.DataFrame(self.gp)
self.bloco_ni['df'] = pd.DataFrame(self.ni)
self.bloco_ve['df'] = pd.DataFrame(self.ve)
self.bloco_ci_ce['df'] = pd.DataFrame(self.ci_ce)
self.bloco_re['df'] = pd.DataFrame(self.re)
self.bloco_lu['df'] = pd.DataFrame(self.lu)
self.bloco_fh['df'] = pd.DataFrame(self.fh)
self.bloco_ft['df'] = | pd.DataFrame(self.ft) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from validada.slicers import iloc
import validada.functions.raising as ck
import validada.decorators.raising as dc
import datetime as dt
def _add_one(df):
return df + 1
def _safe_add_one(df):
return df.fillna(0.0) + 1
def _noop(df):
return df
def test_is_in_index():
dr = pd.date_range(start='2015-01-01', periods=6, freq='D')
df = pd.DataFrame(data = list(range(6)), index=dr)
d = dt.date(2015,1,3)
result = ck.has_in_index(df, obj=d)
tm.assert_frame_equal(df, result)
result = dc.has_in_index(obj=d)(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
result = ck.has_in_index(df, obj=d, try_ix=True)
result = ck.has_in_index(df, obj=d, try_ix=True, try_strftime="%Y-%m")
result = ck.has_in_index(df, obj=d, check_na=True)
def test_is_in_index_raises():
dr = pd.date_range(start='2015-01-01', periods=6, freq='D')
da = list(range(6))
da[2] = pd.np.nan
df = pd.DataFrame(data = da, index=dr)
d = dt.date(2015,1,12)
with pytest.raises(AssertionError):
ck.has_in_index(df, obj=d)
with pytest.raises(AssertionError):
dc.has_in_index(obj=d)(_add_one)(df)
with pytest.raises(AssertionError):
ck.has_in_index(df, obj=d, try_ix=True)
ck.has_in_index(df, obj=d, try_ix=True, try_strftime="%Y-%m")
d = dt.datetime(2015,1,3)
ck.has_in_index(df, obj=d)
ck.has_in_index(df, obj=d, check_na=False)
with pytest.raises(AssertionError):
ck.has_in_index(df, obj=d, check_na=True)
def test_equal_columns_sum():
df = pd.DataFrame({'A': [1,2,3,4,5], 'B': [1,2,3,4,5]})
result = ck.equal_columns_sum(df)
tm.assert_frame_equal(df, result)
result = dc.equal_columns_sum()(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
def test_equal_columns_sum_raises_slice():
df = pd.DataFrame({'A': [None,2,3,4,0], 'B': [1,2,3,4,None]})
with pytest.raises(AssertionError):
ck.equal_columns_sum(df)
with pytest.raises(AssertionError):
dc.equal_columns_sum()(_add_one)(df)
s = iloc[-3:]
result = ck.equal_columns_sum(df, s)
tm.assert_frame_equal(df, result)
result = dc.equal_columns_sum(s)(_safe_add_one)(df)
tm.assert_frame_equal(result, _safe_add_one(df))
def test_none_missing():
df = pd.DataFrame(np.random.randn(5, 3))
result = ck.none_missing(df)
tm.assert_frame_equal(df, result)
result = dc.none_missing()(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
def test_none_missing_raises():
df = pd.DataFrame(np.random.randn(5, 3))
df.iloc[0, 0] = np.nan
with pytest.raises(AssertionError):
ck.none_missing(df)
with pytest.raises(AssertionError):
dc.none_missing()(_add_one)(df)
def test_monotonic_increasing_lax():
df = pd.DataFrame([1, 2, 2])
tm.assert_frame_equal(df, ck.is_monotonic(df, increasing=True))
result = dc.is_monotonic(increasing=True)(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
df = pd.DataFrame([1, 2, 1])
with pytest.raises(AssertionError):
ck.is_monotonic(df, increasing=True)
with pytest.raises(AssertionError):
dc.is_monotonic(increasing=True)(_add_one)(df)
df = pd.DataFrame([3, 2, 1])
with pytest.raises(AssertionError):
ck.is_monotonic(df, increasing=True)
with pytest.raises(AssertionError):
dc.is_monotonic(increasing=True)(_add_one)(df)
def test_monotonic_increasing_strict():
df = pd.DataFrame([1, 2, 3])
tm.assert_frame_equal(df, ck.is_monotonic(df, increasing=True, strict=True))
result = dc.is_monotonic(increasing=True, strict=True)(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
df = pd.DataFrame([1, 2, 2])
with pytest.raises(AssertionError):
ck.is_monotonic(df, increasing=True, strict=True)
with pytest.raises(AssertionError):
dc.is_monotonic(increasing=True, strict=True)(_add_one)(df)
df = pd.DataFrame([3, 2, 1])
with pytest.raises(AssertionError):
ck.is_monotonic(df, increasing=True, strict=True)
with pytest.raises(AssertionError):
dc.is_monotonic(increasing=True, strict=True)(_add_one)(df)
def test_monotonic_decreasing():
df = pd.DataFrame([2, 2, 1])
tm.assert_frame_equal(df, ck.is_monotonic(df, increasing=False))
result = dc.is_monotonic(increasing=False)(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
df = pd.DataFrame([1, 2, 1])
with pytest.raises(AssertionError):
ck.is_monotonic(df, increasing=False)
with pytest.raises(AssertionError):
dc.is_monotonic(increasing=False)(_add_one)(df)
df = pd.DataFrame([1, 2, 3])
with pytest.raises(AssertionError):
ck.is_monotonic(df, increasing=False)
with pytest.raises(AssertionError):
dc.is_monotonic(increasing=False)(_add_one)(df)
def test_monotonic_decreasing_strict():
df = pd.DataFrame([3, 2, 1])
tm.assert_frame_equal(df, ck.is_monotonic(df, increasing=False,
strict=True))
result = dc.is_monotonic(increasing=False, strict=True)(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
df = pd.DataFrame([2, 2, 1])
with pytest.raises(AssertionError):
ck.is_monotonic(df, increasing=False, strict=True)
with pytest.raises(AssertionError):
dc.is_monotonic(increasing=False, strict=True)(_add_one)(df)
df = pd.DataFrame([1, 2, 3])
with pytest.raises(AssertionError):
ck.is_monotonic(df, increasing=False, strict=True)
with pytest.raises(AssertionError):
dc.is_monotonic(increasing=False, strict=True)(_add_one)(df)
def test_monotonic_either():
df = pd.DataFrame({'A': [1, 2, 2], 'B': [3, 2, 2]})
tm.assert_frame_equal(df, ck.is_monotonic(df))
result = dc.is_monotonic()(_add_one)(df)
tm.assert_frame_equal(result, df + 1)
df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 2, 1]})
with pytest.raises(AssertionError):
ck.is_monotonic(df)
with pytest.raises(AssertionError):
dc.is_monotonic()(_add_one)(df)
def test_monotonic_either_stict():
df = | pd.DataFrame({'A': [1, 2, 3], 'B': [3, 2, 1]}) | pandas.DataFrame |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = | pd.date_range('1/1/2000', freq='D', periods=8, tz=tz) | pandas.date_range |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, date_range, offsets
import pandas._testing as tm
class TestDataFrameShift:
def test_shift(self, datetime_frame, int_frame):
# naive shift
shiftedFrame = datetime_frame.shift(5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
shiftedFrame = datetime_frame.shift(-5)
| tm.assert_index_equal(shiftedFrame.index, datetime_frame.index) | pandas._testing.assert_index_equal |
# -*- encoding: utf-8 -*-
"""
Created by <NAME> at 04/09/2021 at 20:39:04
Project: py_dss_tools [set, 2021]
"""
import attr
import pandas as pd
from py_dss_tools.model.pdelement import PDElement
from py_dss_tools.utils import Utils
@attr.s(kw_only=True)
class Line(PDElement):
"""
_b0 = alternate way to specify C0. MicroS per unit length.
_b1 = alternate way to specify C1. MicroS per unit length.
_bus1: Name of bus to which first terminal is connected.
Example:
bus1=busname (assumes all terminals connected in normal phase order)
bus1=busname.3.1.2.0 (specify terminal to node connections explicitly)
_bus2: Name of bus to which 2nd terminal is connected.
_c0: Zero-sequence capacitance, nf per unit length. Setting any of R1, R0, X1, X0, C1, C0 forces the program to
use the symmetrical component line definition.See also B0.
_c1: Positive-sequence capacitance, nf per unit length. Setting any of R1, R0, X1, X0, C1, C0 forces the program
to use the symmetrical component line definition. See also Cmatrix and B1.
_cmatrix: Nodal Capacitance matrix, lower triangle, nf per unit length.Order of the matrix is the number of
phases. May be used to specify the shunt capacitance of any line configuration. Using any of Rmatrix, Xmatrix,
Cmatrix forces program to use the matrix values for line impedance definition. For balanced line models, you may
use the standard symmetrical component data definition instead.
_cncables = array of CNData names for use in a cable constants calculation. Must be used in conjunction with the
Spacing property. Specify the Spacing first, using "nphases" cncables. You may later specify "nconds-nphases" wires
for separate neutrals.
_earthmodel: One of {Carson | FullCarson | Deri}. Default is the global value established with the Set EarthModel
command. See the Options Help on EarthModel option. This is used to override the global value for this line. This
option applies only when the "geometry" property is used.
_geometry: Geometry code for LineGeometry Object. Supercedes any previous definition of line impedance. Line
constants are computed for each frequency change or rho change. CAUTION: may alter number of phases. You cannot
subsequently change the number of phases unless you change how the line impedance is defined.
_length: Length of line. Default is 1.0. If units do not match the impedance data, specify "units" property.
_linecode: Name of linecode object describing line impedances. If you use a line code, you do not need to specify
the impedances here. The line code must have been PREVIOUSLY defined. The values specified last will prevail over
those specified earlier (left-to-right sequence of properties). You can subsequently change the number of phases
if symmetrical component quantities are specified.If no line code or impedance data are specified, the line object
defaults to 336 MCM ACSR on 4 ft spacing.
_linetype: Code designating the type of line.
One of: OH, UG, UG_TS, UG_CN, SWT_LDBRK, SWT_FUSE, SWT_SECT, SWT_REC, SWT_DISC, SWT_BRK, SWT_ELBOW
OpenDSS currently does not use this internally. For whatever purpose the user defines. Default is OH.
_r0: Zero-sequence Resistance, ohms per unit length. Setting any of R1, R0, X1, X0, C1, C0 forces the program to
use the symmetrical component line definition.
_r1: Positive-sequence Resistance, ohms per unit length. Setting any of R1, R0, X1, X0, C1, C0 forces the program
to use the symmetrical component line definition. See also Rmatrix.
_ratings = an array of ratings to be used when the seasonal ratings flag is True. It can be used to insert multiple
ratings to change during a QSTS simulation to evaluate different ratings in lines.
_rg: Carson earth return resistance per unit length used to compute impedance values at base frequency. Default is
0.01805 = 60 Hz value in ohms per kft (matches default line impedances). This value is required for harmonic
solutions if you wish to adjust the earth return impedances for frequency. If not, set both Rg and Xg = 0.
_rho: default_factory=100 meter ohms. Earth resitivity used to compute earth correction factor. Overrides Line
geometry definition if specified.
_rmatrix: Resistance matrix, lower triangle, ohms per unit length. Order of the matrix is the number of phases.
May be used to specify the impedance of any line configuration. Using any of Rmatrix, Xmatrix, Cmatrix forces
program to use the matrix values for line impedance definition. For balanced line models, you may use the standard
symmetrical component data definition instead.
_seasons: Defines the number of ratings to be defined for the wire, to be used only when defining seasonal ratings
using the "Ratings" property.
_spacing: Reference to a LineSpacing for use in a line constants calculation. Must be used in conjunction with the
Wires property. Specify this before the wires property.
_switch: {y/n | T/F} default_factory= no/false. Designates this line as a switch for graphics and algorithmic
purposes. SIDE EFFECT: Sets r1 = 1.0; x1 = 1.0; r0 = 1.0; x0 = 1.0; c1 = 1.1 ; c0 = 1.0; length = 0.001; You
must reset if you want something different.
_tscables = array of TSData names for use in a cable constants calculation. Must be used in conjunction with the
Spacing property. Specify the Spacing first, using "nphases" tscables. You may later specify "nconds-nphases" wires
for separate neutrals
_units: Length Units = {none | mi|kft|km|m|Ft|in|cm } Default is None - assumes length units match impedance units.
_wires = array of WireData names for use in an overhead line constants calculation. Must be used in conjunction with
the Spacing property. Specify the Spacing first, and "ncond" wires. May also be used to specify bare neutrals with
cables, using "ncond-nphase" wires.
_x0: Zero-sequence Reactance, ohms per unit length. Setting any of R1, R0, X1, X0, C1, C0 forces the program to
use the symmetrical component line definition.
_x1: Positive-sequence Reactance, ohms per unit length. Setting any of R1, R0, X1, X0, C1, C0 forces the program
to use the symmetrical component line definition. See also Xmatrix
_xg: Carson earth return reactance per unit length used to compute impedance values at base frequency. For making
better frequency adjustments. Default is 0.155081 = 60 Hz value in ohms per kft (matches default line impedances).
This value is required for harmonic solutions if you wish to adjust the earth return impedances for frequency. If
not, set both Rg and Xg = 0.
_xmatrix: Reactance matrix, lower triangle, ohms per unit length. Order of the matrix is the number of phases. May
be used to specify the impedance of any line configuration. Using any of Rmatrix, Xmatrix, Cmatrix forces program to
use the matrix values for line impedance definition. For balanced line models, you may use the standard
symmetrical component data definition instead.
"""
title_ = "Line"
plural_title_ = "Lines"
columns_ = ['_b0', '_b1', '_basefreq', '_bus1', '_bus2', '_c0', '_c1', '_cmatrix', '_cncables', '_earthmodel',
'_emergamps', '_enabled', '_faultrate', '_geometry', '_length', '_like', '_linecode', '_linetype',
'_name', '_normamps', '_pctperm', '_phases', '_r0', '_r1', '_ratings', '_repair', '_rg', '_rho',
'_rmatrix', '_seasons', '_spacing', '_switch', '_tscables', '_units', '_wires', '_x0', '_x1', '_xg',
'_xmatrix']
# columns_no_ = ['b0', 'b1', 'basefreq', 'bus1', 'bus2', 'c0', 'c1', 'cmatrix', 'cncables', 'earthmodel',
# 'emergamps', 'enabled', 'faultrate', 'geometry', 'length', 'like', 'linecode', 'linetype', 'name', 'normamps',
# 'pctperm', 'phases', 'r0', 'r1', 'ratings', 'repair', 'rg', 'rho', 'rmatrix', 'seasons', 'spacing', 'switch',
# 'tscables', 'units', 'wires', 'x0', 'x1', 'xg', 'xmatrix']
_b0 = attr.ib(validator=attr.validators.instance_of((int, float)), default=1.0)
_b1 = attr.ib(validator=attr.validators.instance_of((int, float)), default=1.0)
_bus1 = attr.ib(validator=attr.validators.instance_of(str), default='')
_bus2 = attr.ib(validator=attr.validators.instance_of(str), default='')
_c0 = attr.ib(validator=attr.validators.instance_of((int, float)), default=1.0)
_c1 = attr.ib(validator=attr.validators.instance_of((int, float)), default=1.1)
_cmatrix = attr.ib(validator=attr.validators.instance_of(str), default='[]')
_cncables = attr.ib(validator=attr.validators.instance_of(str), default='')
_earthmodel = attr.ib(validator=attr.validators.instance_of(str), default='Deri')
_geometry = attr.ib(validator=attr.validators.instance_of(str), default='')
_length = attr.ib(validator=attr.validators.instance_of((int, float)), default=1.0)
_linecode = attr.ib(validator=attr.validators.instance_of(str), default='')
_linetype = attr.ib(validator=attr.validators.instance_of(str), default='')
_phases = attr.ib(validator=attr.validators.instance_of(int), default=3)
_r0 = attr.ib(validator=attr.validators.instance_of((int, float)), default=0.01)
_r1 = attr.ib(validator=attr.validators.instance_of((int, float)), default=0.01)
_ratings = attr.ib(validator=attr.validators.instance_of(str), default='[400,]')
_rg = attr.ib(validator=attr.validators.instance_of((int, float)), default=0.01805)
_rho = attr.ib(validator=attr.validators.instance_of((int, float)), default=100)
_rmatrix = attr.ib(validator=attr.validators.instance_of(str), default='[]')
_seasons = attr.ib(validator=attr.validators.instance_of(str), default='')
_spacing = attr.ib(validator=attr.validators.instance_of(str), default='')
_switch = attr.ib(validator=attr.validators.instance_of(str), default='false')
_tscables = attr.ib(validator=attr.validators.instance_of(str), default='')
_units = attr.ib(validator=attr.validators.instance_of(str), default='m')
_wires = attr.ib(validator=attr.validators.instance_of(str), default='')
_x0 = attr.ib(validator=attr.validators.instance_of((int, float)), default=1.0)
_x1 = attr.ib(validator=attr.validators.instance_of((int, float)), default=1.0)
_xg = attr.ib(validator=attr.validators.instance_of((int, float)), default=0.155081)
_xmatrix = attr.ib(validator=attr.validators.instance_of(str), default='[]')
def __attrs_post_init__(self):
if self._name != '':
self._name = Utils.remove_blank_spaces(self._name)
else:
self._name = 'my_line_' + Utils.generate_random_string()
if self._bus1 == '':
self._bus1 = "my_bus1_" + Utils.generate_random_string()
else:
self._bus1 = Utils.remove_blank_spaces(self._bus1.lower())
if self._bus2 == '':
self._bus2 = "my_bus2_" + Utils.generate_random_string()
else:
self._bus2 = Utils.remove_blank_spaces(self._bus2.lower())
def to_dataframe(self):
return | pd.DataFrame.from_records([self.__dict__]) | pandas.DataFrame.from_records |
import time
from Models.GBM.LightGBM import LightGBM
from ParamTuning.Optimizer import Optimizer
from Utils.Data import Data
import pandas as pd
from Utils.Data.Data import get_dataset_xgb_batch
from Utils.Data.Features.Generated.EnsemblingFeature.LGBMEnsemblingFeature import LGBMEnsemblingFeature
from sklearn.model_selection import train_test_split
import time
import Blending.like_params as like_params
import Blending.reply_params as reply_params
import Blending.retweet_params as retweet_params
import Blending.comment_params as comment_params
from Utils.Data.Features.Generated.EnsemblingFeature.XGBEnsembling import XGBEnsembling
import argparse
from tqdm import tqdm
from Utils.Data.Features.Generated.EnsemblingFeature.XGBFoldEnsembling import *
from Utils.Submission.Submission import create_submission_file
def prediction(LGBM, dataset_id, df, label):
tweets = Data.get_feature("raw_feature_tweet_id", dataset_id)["raw_feature_tweet_id"].array
users = Data.get_feature("raw_feature_engager_id", dataset_id)["raw_feature_engager_id"].array
# LGBM Prediction
prediction_start_time = time.time()
predictions = LGBM.get_prediction(df.to_numpy())
print(f"Prediction time: {time.time() - prediction_start_time} seconds")
# Uncomment to plot feature importance at the end of training
# LGBM.plot_fimportance()
create_submission_file(tweets, users, predictions, f"{dataset_id}_{label}_lgbm_blending_submission.csv")
def get_ensembling_label(label, dataset_id):
from Utils.Data import Data
return Data.get_feature_batch(f"tweet_feature_engagement_is_{label}",
dataset_id, total_n_split=1, split_n=0, sample=0.3)
def get_nn_prediction(label, dataset_id):
df = pd.read_csv(f'Dataset/Features/{dataset_id}/ensembling/nn_predictions_{label}.csv',
header=None, names=[0, 1, 2], usecols=[2])
df.columns = [f'nn_predictions_{label}']
return df
def params_by_label(label):
if label in ["like"]:
lgbm_params = like_params.lgbm_get_params()
xgb_params = like_params.xgb_get_params()
elif label in ["reply"]:
lgbm_params = reply_params.lgbm_get_params()
xgb_params = reply_params.xgb_get_params()
elif label in ["retweet"]:
lgbm_params = retweet_params.lgbm_get_params()
xgb_params = retweet_params.xgb_get_params()
elif label in ["comment"]:
lgbm_params = comment_params.lgbm_get_params()
xgb_params = comment_params.xgb_get_params()
else:
assert False, "What?"
return lgbm_params, xgb_params
def main():
# Instantiate the parser
parser = argparse.ArgumentParser()
parser.add_argument('label', type=str,
help='required argument: label')
args = parser.parse_args()
nn_labels = ["like", "reply", "retweet", "comment"]
LABEL = args.label
assert LABEL in ["like", "reply", "retweet", "comment"], "LABEL not valid."
print(f"label is {LABEL}")
features = ["raw_feature_creator_follower_count",
"raw_feature_creator_following_count",
"raw_feature_engager_follower_count",
"raw_feature_engager_following_count",
"raw_feature_creator_is_verified",
"raw_feature_engager_is_verified",
"raw_feature_engagement_creator_follows_engager",
"tweet_feature_number_of_photo",
"tweet_feature_number_of_video",
"tweet_feature_number_of_gif",
"tweet_feature_number_of_media",
"tweet_feature_is_retweet",
"tweet_feature_is_quote",
"tweet_feature_is_top_level",
"tweet_feature_number_of_hashtags",
"tweet_feature_creation_timestamp_hour",
"tweet_feature_creation_timestamp_week_day",
# "tweet_feature_number_of_mentions",
"tweet_feature_token_length",
"tweet_feature_token_length_unique",
"tweet_feature_text_topic_word_count_adult_content",
"tweet_feature_text_topic_word_count_kpop",
"tweet_feature_text_topic_word_count_covid",
"tweet_feature_text_topic_word_count_sport",
"number_of_engagements_with_language_like",
"number_of_engagements_with_language_retweet",
"number_of_engagements_with_language_reply",
"number_of_engagements_with_language_comment",
"number_of_engagements_with_language_negative",
"number_of_engagements_with_language_positive",
"number_of_engagements_ratio_like",
"number_of_engagements_ratio_retweet",
"number_of_engagements_ratio_reply",
"number_of_engagements_ratio_comment",
"number_of_engagements_ratio_negative",
"number_of_engagements_ratio_positive",
"number_of_engagements_between_creator_and_engager_like",
"number_of_engagements_between_creator_and_engager_retweet",
"number_of_engagements_between_creator_and_engager_reply",
"number_of_engagements_between_creator_and_engager_comment",
"number_of_engagements_between_creator_and_engager_negative",
"number_of_engagements_between_creator_and_engager_positive",
"creator_feature_number_of_like_engagements_received",
"creator_feature_number_of_retweet_engagements_received",
"creator_feature_number_of_reply_engagements_received",
"creator_feature_number_of_comment_engagements_received",
"creator_feature_number_of_negative_engagements_received",
"creator_feature_number_of_positive_engagements_received",
"creator_feature_number_of_like_engagements_given",
"creator_feature_number_of_retweet_engagements_given",
"creator_feature_number_of_reply_engagements_given",
"creator_feature_number_of_comment_engagements_given",
"creator_feature_number_of_negative_engagements_given",
"creator_feature_number_of_positive_engagements_given",
"engager_feature_number_of_like_engagements_received",
"engager_feature_number_of_retweet_engagements_received",
"engager_feature_number_of_reply_engagements_received",
"engager_feature_number_of_comment_engagements_received",
"engager_feature_number_of_negative_engagements_received",
"engager_feature_number_of_positive_engagements_received",
"number_of_engagements_like",
"number_of_engagements_retweet",
"number_of_engagements_reply",
"number_of_engagements_comment",
"number_of_engagements_negative",
"number_of_engagements_positive",
"engager_feature_number_of_previous_like_engagement",
"engager_feature_number_of_previous_reply_engagement",
"engager_feature_number_of_previous_retweet_engagement",
"engager_feature_number_of_previous_comment_engagement",
"engager_feature_number_of_previous_positive_engagement",
"engager_feature_number_of_previous_negative_engagement",
"engager_feature_number_of_previous_engagement",
"engager_feature_number_of_previous_like_engagement_ratio_1",
"engager_feature_number_of_previous_reply_engagement_ratio_1",
"engager_feature_number_of_previous_retweet_engagement_ratio_1",
"engager_feature_number_of_previous_comment_engagement_ratio_1",
"engager_feature_number_of_previous_positive_engagement_ratio_1",
"engager_feature_number_of_previous_negative_engagement_ratio_1",
"engager_feature_number_of_previous_like_engagement_ratio",
"engager_feature_number_of_previous_reply_engagement_ratio",
"engager_feature_number_of_previous_retweet_engagement_ratio",
"engager_feature_number_of_previous_comment_engagement_ratio",
"engager_feature_number_of_previous_positive_engagement_ratio",
"engager_feature_number_of_previous_negative_engagement_ratio",
"engager_feature_number_of_previous_like_engagement_between_creator_and_engager_by_creator",
"engager_feature_number_of_previous_reply_engagement_between_creator_and_engager_by_creator",
"engager_feature_number_of_previous_retweet_engagement_between_creator_and_engager_by_creator",
"engager_feature_number_of_previous_comment_engagement_between_creator_and_engager_by_creator",
"engager_feature_number_of_previous_negative_engagement_between_creator_and_engager_by_creator",
"engager_feature_number_of_previous_positive_engagement_between_creator_and_engager_by_creator",
"engager_feature_number_of_previous_like_engagement_between_creator_and_engager_by_engager",
"engager_feature_number_of_previous_reply_engagement_between_creator_and_engager_by_engager",
"engager_feature_number_of_previous_retweet_engagement_between_creator_and_engager_by_engager",
"engager_feature_number_of_previous_comment_engagement_between_creator_and_engager_by_engager",
"engager_feature_number_of_previous_negative_engagement_between_creator_and_engager_by_engager",
"engager_feature_number_of_previous_positive_engagement_between_creator_and_engager_by_engager",
# "tweet_feature_number_of_previous_like_engagements",
# "tweet_feature_number_of_previous_reply_engagements",
# "tweet_feature_number_of_previous_retweet_engagements",
# "tweet_feature_number_of_previous_comment_engagements",
# "tweet_feature_number_of_previous_positive_engagements",
# "tweet_feature_number_of_previous_negative_engagements",
"creator_feature_number_of_previous_like_engagements_given",
"creator_feature_number_of_previous_reply_engagements_given",
"creator_feature_number_of_previous_retweet_engagements_given",
"creator_feature_number_of_previous_comment_engagements_given",
"creator_feature_number_of_previous_positive_engagements_given",
"creator_feature_number_of_previous_negative_engagements_given",
"creator_feature_number_of_previous_like_engagements_received",
"creator_feature_number_of_previous_reply_engagements_received",
"creator_feature_number_of_previous_retweet_engagements_received",
"creator_feature_number_of_previous_comment_engagements_received",
"creator_feature_number_of_previous_positive_engagements_received",
"creator_feature_number_of_previous_negative_engagements_received",
"engager_feature_number_of_previous_like_engagement_with_language",
"engager_feature_number_of_previous_reply_engagement_with_language",
"engager_feature_number_of_previous_retweet_engagement_with_language",
"engager_feature_number_of_previous_comment_engagement_with_language",
"engager_feature_number_of_previous_positive_engagement_with_language",
"engager_feature_number_of_previous_negative_engagement_with_language",
"engager_feature_knows_hashtag_positive",
"engager_feature_knows_hashtag_negative",
"engager_feature_knows_hashtag_like",
"engager_feature_knows_hashtag_reply",
"engager_feature_knows_hashtag_rt",
"engager_feature_knows_hashtag_comment",
"creator_and_engager_have_same_main_language",
"is_tweet_in_creator_main_language",
"is_tweet_in_engager_main_language",
# "statistical_probability_main_language_of_engager_engage_tweet_language_1",
# "statistical_probability_main_language_of_engager_engage_tweet_language_2",
"creator_and_engager_have_same_main_grouped_language",
"is_tweet_in_creator_main_grouped_language",
"is_tweet_in_engager_main_grouped_language",
# # "hashtag_similarity_fold_ensembling_positive",
# # "link_similarity_fold_ensembling_positive",
# # "domain_similarity_fold_ensembling_positive"
"tweet_feature_creation_timestamp_hour_shifted",
"tweet_feature_creation_timestamp_day_phase",
"tweet_feature_creation_timestamp_day_phase_shifted"
]
label = [
f"tweet_feature_engagement_is_{LABEL}"
]
train_dataset = "cherry_train"
val_dataset = "cherry_val"
test_dataset = "new_test"
private_test_dataset = "last_test"
ensembling_list_dict = {
'like': ['reply', 'retweet', 'comment'],
'reply': ['reply', 'retweet', 'comment'],
'retweet': ['reply', 'retweet', 'comment'],
'comment': ['reply', 'retweet', 'comment'],
}
ensembling_list = ensembling_list_dict[LABEL]
ensembling_lgbm_params = {}
ensembling_xgb_params = {}
for ens_label in ensembling_list:
ensembling_lgbm_params[ens_label], ensembling_xgb_params[ens_label] \
= params_by_label(ens_label)
categorical_features_set = set([])
# Load train data
# loading_data_start_time = time.time()
# df_train, df_train_label = Data.get_dataset_xgb(train_dataset, features, label)
# print(f"Loading train data time: {loading_data_start_time - time.time()} seconds")
# Load val data
df_val, df_val_label = Data.get_dataset_xgb(val_dataset, features, label)
# Load test data
df_test = Data.get_dataset(features, test_dataset)
df_private = Data.get_dataset(features, private_test_dataset)
new_index = pd.Series(df_test.index).map(lambda x: x + len(df_val))
df_test.set_index(new_index, inplace=True)
new_index_private = pd.Series(df_private.index).map(lambda x: x + len(df_val) + len(df_test))
df_private.set_index(new_index_private, inplace=True)
# df to be predicted by the lgbm blending feature
df_to_predict = pd.concat([df_val, df_test, df_private])
# BLENDING FEATURE DECLARATION
feature_list = []
# NEW CODE ADDED
df_train = pd.DataFrame(columns=features)
df_train_label = pd.DataFrame(columns=label)
need_to_load_train_set = False
for ens_label in ensembling_list:
lgbm_params = ensembling_lgbm_params[ens_label]
for lgbm_param_dict in lgbm_params:
start_time = time.time()
if not LGBMEnsemblingFeature(dataset_id=private_test_dataset,
df_train=df_train,
df_train_label=get_ensembling_label(ens_label, train_dataset),
df_to_predict=df_to_predict,
param_dict=lgbm_param_dict,
categorical_features_set=categorical_features_set).has_feature():
print(f"{ens_label} {lgbm_param_dict}")
need_to_load_train_set = True
if need_to_load_train_set:
df_train, df_train_label = get_dataset_xgb_batch(total_n_split=1, split_n=0, dataset_id=train_dataset,
X_label=features, Y_label=label, sample=0.3)
for ens_label in ensembling_list:
lgbm_params = ensembling_lgbm_params[ens_label]
for lgbm_param_dict in lgbm_params:
start_time = time.time()
feature_list.append(LGBMEnsemblingFeature(dataset_id=private_test_dataset,
df_train=df_train,
df_train_label=get_ensembling_label(ens_label, train_dataset),
df_to_predict=df_to_predict,
param_dict=lgbm_param_dict,
categorical_features_set=categorical_features_set))
# NEW PARTll
# ONLY THIS PART IS NEW
# LOAD THIS PART FIRST
del df_train, df_train_label
df_feature_list = [x.load_or_create() for x in tqdm(feature_list)]
for ens_label in ensembling_list:
start_time = time.time()
if ens_label == "like":
val_features_df = XGBFoldEnsemblingLike2(val_dataset).load_or_create()
test_features_df = XGBFoldEnsemblingLike2(test_dataset).load_or_create()
private_features_df = XGBFoldEnsemblingLike2(private_test_dataset).load_or_create()
elif ens_label == "retweet":
val_features_df = XGBFoldEnsemblingRetweet2(val_dataset).load_or_create()
test_features_df = XGBFoldEnsemblingRetweet2(test_dataset).load_or_create()
private_features_df = XGBFoldEnsemblingRetweet2(private_test_dataset).load_or_create()
elif ens_label == "reply":
val_features_df = XGBFoldEnsemblingReply2(val_dataset).load_or_create()
test_features_df = XGBFoldEnsemblingReply2(test_dataset).load_or_create()
private_features_df = XGBFoldEnsemblingReply2(private_test_dataset).load_or_create()
elif ens_label == "comment":
val_features_df = XGBFoldEnsemblingComment2(val_dataset).load_or_create()
test_features_df = XGBFoldEnsemblingComment2(test_dataset).load_or_create()
private_features_df = XGBFoldEnsemblingComment2(private_test_dataset).load_or_create()
else:
assert False, "oh oh something went wrong. label not found"
test_features_df.set_index(new_index, inplace=True)
private_features_df.set_index(new_index_private, inplace=True)
xgb_feature_df = pd.concat([val_features_df, test_features_df, private_features_df])
df_feature_list.append(xgb_feature_df)
print(f"time: {time.time() - start_time}")
del val_features_df, test_features_df, private_features_df
# check dimensions
len_val = len(df_val)
len_test = len(df_test)
len_private = len(df_private)
for df_feat in df_feature_list:
assert len(df_feat) == (len_val + len_test + len_private), \
f"Blending features are not of dimension expected, len val: {len_val} len test: {len_test}" \
f" len private test: {len_private}\n " \
f"obtained len: {len(df_feat)} of {df_feat.columns[0]}\n"
# split feature dataframe in validation and testing
df_feat_val_list = [df_feat.iloc[:len_val] for df_feat in df_feature_list]
df_feat_test_list = [df_feat.iloc[len_val:-len_private] for df_feat in df_feature_list]
df_feat_private_list = [df_feat.iloc[-len_private:] for df_feat in df_feature_list]
df_feat_nn_val_list = [get_nn_prediction(l, val_dataset) for l in nn_labels]
df_feat_nn_test_list = [get_nn_prediction(l, test_dataset) for l in nn_labels]
df_feat_nn_private_list = [get_nn_prediction(l, private_test_dataset) for l in nn_labels]
for df_feat_nn_test in df_feat_nn_test_list:
new_index = | pd.Series(df_feat_nn_test.index) | pandas.Series |
import pandas as pd
df = pd.DataFrame({'day': ['2021-04-09', '2021-04-10', '2021-04-11', '2021-04-12', '2021-04-13'],
'sales': [101, 145, 53, 56, 76],
'people' : [1, 2, 2, 1, 3]})
df['day'] = | pd.to_datetime(df['day']) | pandas.to_datetime |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = | TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT]) | pandas.TimedeltaIndex |
import json
from datetime import datetime
from flask import Blueprint, request
from flask_jwt_extended import jwt_required
import urllib.parse
from collections import defaultdict
from config.redisconfig import redis
from models.container_image_registry import RegistryCredential
from models.scheduler import Scheduler
from utils import constants
from utils.common import calculate_interval
from utils.es_query_utils import get_latest_secret_scan_id
from utils.helper import websocketio_channel_name_format, get_all_secret_scanned_images
from utils.response import set_response
from utils.custom_exception import InvalidUsage, InternalError
from utils.resource import filter_node_for_secret_scan
from utils.constants import TIME_UNIT_MAPPING, SECRET_SCAN_INDEX, SECRET_SCAN_LOGS_INDEX, \
ES_TERMS_AGGR_SIZE, ES_MAX_CLAUSE
from utils.esconn import ESConn, GroupByParams
from resource_models.node import Node
import pandas as pd
import re
import requests
import time
secret_api = Blueprint("secret_api", __name__)
@secret_api.route("/secret/node_report", methods=["GET", "POST"])
@jwt_required()
def secret_scanned_nodes():
number = request.args.get("number")
time_unit = request.args.get("time_unit")
if number:
try:
number = int(number)
except ValueError:
raise InvalidUsage("Number should be an integer value.")
if bool(number is not None) ^ bool(time_unit):
raise InvalidUsage("Require both number and time_unit or ignore both of them.")
if time_unit and time_unit not in TIME_UNIT_MAPPING.keys():
raise InvalidUsage("time_unit should be one of these, month/day/hour/minute")
filters = {}
node_filters = {}
page_size = 10
start_index = 0
if request.is_json:
if type(request.json) != dict:
raise InvalidUsage("Request data invalid")
req_filters = request.json.get("filters", {})
node_ids = []
node_ids.extend(req_filters.get("image_name_with_tag", []))
node_ids.extend(req_filters.get("host_name", []))
filters["node_id"] = node_ids
if len(req_filters.get("container_name", [])) > 0:
filters["container_name"] = req_filters.get("container_name", [])
if len(req_filters.get("kubernetes_cluster_name", [])) > 0:
filters["kubernetes_cluster_name"] = req_filters.get("kubernetes_cluster_name", [])
node_filters = request.json.get("node_filters", {})
page_size = request.json.get("size", page_size)
start_index = request.json.get("start_index", start_index)
if node_filters:
tmp_filters = filter_node_for_secret_scan(node_filters)
if tmp_filters:
filters = {**filters, **tmp_filters}
lucene_query_string = request.args.get("lucene_query")
if lucene_query_string:
lucene_query_string = urllib.parse.unquote(lucene_query_string)
aggs = {
"node_id": {
"terms": {
"field": "node_id.keyword",
"size": ES_TERMS_AGGR_SIZE
},
"aggs": {
"scan_id": {
"terms": {
"field": "scan_id.keyword",
"size": ES_TERMS_AGGR_SIZE
},
"aggs": {
"severity": {
"terms": {
"field": "Severity.level.keyword",
"size": 25
}
},
"scan_recent_timestamp": {
"max": {
"field": "@timestamp"
}
},
"node_name": {
"terms": {
"field": "node_name.keyword"
}
},
"container_name": {
"terms": {
"field": "container_name.keyword"
}
}
}
},
"node_type": {
"terms": {
"field": "node_type.keyword",
"size": 1
},
}
}
}
}
aggs_response = ESConn.aggregation_helper(
SECRET_SCAN_INDEX,
filters,
aggs,
number,
TIME_UNIT_MAPPING.get(time_unit),
lucene_query_string
)
response = []
active_containers = defaultdict(int)
containers_topology_data = redis.get(
websocketio_channel_name_format("{0}?format=deepfence".format(constants.NODE_TYPE_CONTAINER))[1])
if containers_topology_data:
containers_topology_data = json.loads(containers_topology_data)
for df_id, container in containers_topology_data.items():
if container.get("image_name"):
if container.get("docker_container_state") != "running":
continue
active_containers[
"{0}:{1}".format(container.get("image_name", ""), container.get("image_tag", ""))] += 1
scan_aggs = {
"node_id": {
"terms": {
"field": "node_id.keyword",
"size": ES_TERMS_AGGR_SIZE
},
"aggs": {
"scan_status": {
"terms": {
"field": "scan_status.keyword",
"size": ES_TERMS_AGGR_SIZE
}
}
}
}
}
scan_aggs_response = ESConn.aggregation_helper(
SECRET_SCAN_LOGS_INDEX,
{},
scan_aggs
)
status_map = {}
for node in scan_aggs_response["aggregations"]["node_id"]["buckets"]:
status_map[node["key"]] = {"error_count": 0, "total_count": 0}
for status_bucket in node["scan_status"]["buckets"]:
if status_bucket["key"] == constants.SECRET_SCAN_STATUS_COMPLETED or \
status_bucket["key"] == constants.CVE_SCAN_STATUS_ERROR:
status_map[node["key"]]["total_count"] += status_bucket["doc_count"]
if status_bucket["key"] == constants.CVE_SCAN_STATUS_ERROR:
status_map[node["key"]]["error_count"] += status_bucket["doc_count"]
if "aggregations" in aggs_response:
for node_id_aggr in aggs_response["aggregations"]["node_id"]["buckets"]:
node_type = ""
if node_id_aggr["node_type"]["buckets"]:
node_type = node_id_aggr["node_type"]["buckets"][0]["key"]
scan_list = []
for scan_id_aggr in node_id_aggr["scan_id"]["buckets"]:
scan_details = {
"time_stamp": scan_id_aggr["scan_recent_timestamp"]["value_as_string"], "severity": {},
"node_name": "", "node_id": node_id_aggr["key"], "scan_id": scan_id_aggr["key"],
"node_type": node_type, "scan_status": constants.SECRET_SCAN_STATUS_COMPLETED}
if scan_id_aggr["node_name"]["buckets"]:
scan_details["node_name"] = node_id_aggr["key"].split(";")[0]
if scan_id_aggr["container_name"]["buckets"] and scan_id_aggr["container_name"]["buckets"][0]["key"] != "":
scan_details["container_name"] = scan_id_aggr["container_name"]["buckets"][0]["key"]
scan_details["node_name"] = scan_details["container_name"]
for status_aggr in scan_id_aggr["severity"]["buckets"]:
scan_details["severity"][status_aggr["key"]] = status_aggr["doc_count"]
scan_details["total"] = 0
for severity in scan_details["severity"]:
scan_details["total"] += scan_details["severity"][severity]
scan_details["active_containers"] = active_containers.get(scan_details["node_name"].split(";")[0], 0)
scan_list.append(scan_details)
scan_list = sorted(scan_list, key=lambda k: k["time_stamp"], reverse=True)
if not scan_list:
continue
node_data = {
"node_name": scan_list[0].get("container_name", "") if scan_list[0]["node_type"] == constants.NODE_TYPE_CONTAINER else scan_list[0]["node_name"],
"node_id": scan_list[0]["node_id"],
"node_type": scan_list[0]["node_type"],
"scans": scan_list,
"time_stamp": scan_list[0]["time_stamp"],
}
node_data["total_count"] = status_map.get(node_data["node_id"], {}).get("total_count", 0)
node_data["error_count"] = status_map.get(node_data["node_id"], {}).get("error_count", 0)
response.append(node_data)
response = sorted(response, key=lambda k: k["time_stamp"], reverse=True)
return set_response(data={"data": response[start_index:(start_index + page_size)], "total": len(response)})
@secret_api.route("/secret/scan_results", methods=["POST"],
endpoint="api_v1_5_secret_scan_results")
@jwt_required()
def secret_scan_results():
"""
Secret API - Get/Delete Secret Scan Results with filters
---
tags:
- Secret
security:
- Bearer: []
operationId: findSecretScanResults
description: Get/Delete secret scan results with filters for node_id
parameters:
- in: body
name: Options
description: Options to get or delete secret scan results
schema:
type: object
properties:
action:
type: string
enum: [get, delete]
default: get
description: Action to perform - `get` or `delete`
size:
type: integer
example: 10
default: 10
minimum: 1
maximum: 10000
description: The numbers of scan results to return
start_index:
type: integer
example: 0
minimum: 0
maximum: 9999
default: 0
description: The number of items to skip before starting to collect the result set
filters:
description: Filter secret scan results by various fields (key value pairs)
type: object
properties:
node_id:
type: array
uniqueItems: true
description: Node ID (refer enumerate api)
example: ["wekgfewgj"]
items:
type: string
scan_id:
type: array
uniqueItems: true
description: Scan ID
example: ["wekgfewgj"]
items:
type: string
status:
type: array
uniqueItems: true
description: Test status
example: ["pass", "fail"]
items:
type: string
host_name:
type: array
uniqueItems: true
description: Host names
example: ["dev-1", "dev-2"]
items:
type: string
responses:
200:
description: Request success
properties:
data:
type: object
description: Response message
properties:
message:
type: string
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
if not request.is_json:
raise InvalidUsage("Missing JSON in request")
req_json = request.json
action = req_json.get("action", "get")
filters = req_json.get("filters", {})
lucene_query = request.args.get("lucene_query", None)
if not filters:
filters = {}
if "node_id" in filters:
scope_ids = []
for node_id in filters["node_id"]:
node = Node(node_id)
scope_ids.append(node.scope_id)
filters["node_id"] = scope_ids
if action == "get":
es_resp = ESConn.search_by_and_clause(
SECRET_SCAN_INDEX, filters, req_json.get("start_index", 0),
req_json.get("sort_order", "desc"), size=req_json.get("size", 10),
scripted_sort=[{"Severity.score": {"order": "desc", "unmapped_type": "double"}}],
lucene_query_string=lucene_query)
return set_response(data={"rows": es_resp["hits"], "total": es_resp.get("total", {}).get("value", 0)})
elif action == "delete":
es_resp = ESConn.search_by_and_clause(
SECRET_SCAN_INDEX, filters, req_json.get("start_index", 0),
req_json.get("sort_order", "desc"), size=req_json.get("size", 10), _source=["_id"])
# no_of_docs_to_be_masked = mask_scan_results(es_resp["hits"])
# no_of_docs_to_be_masked parameter of format func
return set_response(data={"message": "deleted {0} scan results".format(0)})
else:
raise InvalidUsage("Unsupported action: {0}".format(action))
@secret_api.route("/secret/scan_registry", methods=["POST"])
@jwt_required()
def secret_scan_results():
post_data = request.json
registry_images = post_data.get("registry_images", {})
action = post_data.get("", constants.NODE_ACTION_SECRET_SCAN_START)
cron_expr = post_data.get("action_args", {}).get("cron", "")
if type(registry_images) != dict:
raise InvalidUsage("registry_images is required for scanning registry_image")
if not registry_images.get("registry_id") or type(registry_images["registry_id"]) != int:
raise InvalidUsage("registry_id is required in registry_images key")
if registry_images.get("image_name_with_tag_list") and type(
registry_images["image_name_with_tag_list"]) != list:
raise InvalidUsage("image_name_with_tag_list must be a list")
for img in registry_images["image_name_with_tag_list"]:
if not img:
raise InvalidUsage("image_name_with_tag_list must not have empty values")
try:
registry_credential = RegistryCredential.query.get(
registry_images["registry_id"])
except Exception as ex:
raise InternalError("Failed to get registry credential {}".format(registry_images["registry_id"]))
datetime_now = datetime.now()
scan_id_list = []
image_list_details_str = redis.get("{0}:{1}".format(constants.REGISTRY_IMAGES_CACHE_KEY_PREFIX,
registry_images["registry_id"]))
if action == constants.NODE_ACTION_SCHEDULE_SECRET_SCAN:
try:
node_action_details = {"action": action, "registry_images": registry_images,
"action_args": post_data.get("action_args", {})}
check_existing = Scheduler.query.filter_by(action=action, nodes=node_action_details).all()
if check_existing:
raise InvalidUsage("A similar schedule already exists")
scheduled_action = Scheduler(
action=action, description=str(node_action_details["action_args"].get("description", "")),
cron_expr=cron_expr, nodes=node_action_details, is_enabled=True, node_names=repr(node_action_details),
status="")
scheduled_action.save()
except Exception as exc:
return set_response(error="Could not save scheduled task: {}".format(exc), status=400)
if registry_images.get("all_registry_images", False):
image_dict = json.loads(image_list_details_str)
image_df = pd.DataFrame(image_dict['image_list'])
image_df['timestamp'] = pd.to_datetime(image_df.pushed_at)
sorted_df = image_df.sort_values(by=['timestamp'], ascending=False)
df_unique_list = sorted_df["image_tag"].unique()
df_unique = | pd.DataFrame(data=df_unique_list, columns=["image_tag"]) | pandas.DataFrame |
import functools
import itertools
import warnings
from collections import OrderedDict
import cupy
import numpy as np
import pandas as pd
from pandas.api.types import is_dtype_equal
import cudf
import cudf._lib as libcudf
from cudf._lib.nvtx import annotate
from cudf._lib.scalar import Scalar
from cudf.core import column
from cudf.core.column import as_column, build_categorical_column
from cudf.utils.dtypes import (
is_categorical_dtype,
is_datetime_dtype,
is_numerical_dtype,
is_scalar,
is_string_dtype,
min_scalar_type,
)
class Frame(libcudf.table.Table):
"""
Frame: A collection of Column objects with an optional index.
Parameters
----------
data : OrderedColumnDict
An OrderedColumnDict mapping column names to Columns
index : Table
A Frame representing the (optional) index columns.
"""
@classmethod
def _from_table(cls, table):
return cls(table._data, index=table._index)
@classmethod
@annotate("CONCAT", color="orange", domain="cudf_python")
def _concat(cls, objs, axis=0, ignore_index=False):
# shallow-copy the input DFs in case the same DF instance
# is concatenated with itself
objs = [f.copy(deep=False) for f in objs]
from cudf.core.index import as_index
from cudf.core.column.column import column_empty
from cudf.core.column.column import build_categorical_column
# Create a dictionary of the common, non-null columns
def get_non_null_cols_and_dtypes(col_idxs, list_of_columns):
# A mapping of {idx: np.dtype}
dtypes = dict()
# A mapping of {idx: [...columns]}, where `[...columns]`
# is a list of columns with at least one valid value for each
# column name across all input dataframes
non_null_columns = dict()
for idx in col_idxs:
for cols in list_of_columns:
# Skip columns not in this frame
if idx >= len(cols) or cols[idx] is None:
continue
# Store the first dtype we find for a column, even if it's
# all-null. This ensures we always have at least one dtype
# for each name. This dtype will be overwritten later if a
# non-null Column with the same name is found.
if idx not in dtypes:
dtypes[idx] = cols[idx].dtype
if cols[idx].valid_count > 0:
if idx not in non_null_columns:
non_null_columns[idx] = [cols[idx]]
else:
non_null_columns[idx].append(cols[idx])
return non_null_columns, dtypes
def find_common_dtypes_and_categories(non_null_columns, dtypes):
# A mapping of {idx: categories}, where `categories` is a
# column of all the unique categorical values from each
# categorical column across all input dataframes
categories = dict()
for idx, cols in non_null_columns.items():
# default to the first non-null dtype
dtypes[idx] = cols[0].dtype
# If all the non-null dtypes are int/float, find a common dtype
if all(is_numerical_dtype(col.dtype) for col in cols):
dtypes[idx] = np.find_common_type(
[col.dtype for col in cols], []
)
# If all categorical dtypes, combine the categories
elif all(is_categorical_dtype(col.dtype) for col in cols):
# Combine and de-dupe the categories
categories[idx] = (
cudf.concat([col.cat().categories for col in cols])
.to_series()
.drop_duplicates()
._column
)
# Set the column dtype to the codes' dtype. The categories
# will be re-assigned at the end
dtypes[idx] = min_scalar_type(len(categories[idx]))
# Otherwise raise an error if columns have different dtypes
elif not all(
is_dtype_equal(c.dtype, dtypes[idx]) for c in cols
):
raise ValueError("All columns must be the same type")
return categories
def cast_cols_to_common_dtypes(
col_idxs, list_of_columns, dtypes, categories
):
# Cast all columns to a common dtype, assign combined categories,
# and back-fill missing columns with all-null columns
for idx in col_idxs:
dtype = dtypes[idx]
for cols in list_of_columns:
# If column not in this df, fill with an all-null column
if idx >= len(cols) or cols[idx] is None:
n = len(next(filter(lambda x: x is not None, cols)))
cols[idx] = column_empty(n, dtype, masked=True)
else:
# If column is categorical, rebase the codes with the
# combined categories, and cast the new codes to the
# min-scalar-sized dtype
if idx in categories:
cols[idx] = (
cols[idx]
.cat()
._set_categories(
categories[idx], is_unique=True
)
.codes
)
cols[idx] = cols[idx].astype(dtype)
def reassign_categories(categories, cols, col_idxs):
for name, idx in zip(cols, col_idxs):
if idx in categories:
cols[name] = build_categorical_column(
categories=categories[idx],
codes=as_column(
cols[name].base_data, dtype=cols[name].dtype
),
mask=cols[name].base_mask,
offset=cols[name].offset,
size=cols[name].size,
)
# Get a list of the unique table column names
names = [name for f in objs for name in f._column_names]
names = list(OrderedDict.fromkeys(names).keys())
# Combine the index and table columns for each Frame into a
# list of [...index_cols, ...table_cols]. If a table is
# missing a column, that list will have None in the slot instead
columns = [
([] if ignore_index else list(f._index._data.columns))
+ [f._data[name] if name in f._data else None for name in names]
for i, f in enumerate(objs)
]
# Get a list of the combined index and table column indices
indices = list(range(functools.reduce(max, map(len, columns))))
# The position of the first table colum in each
# combined index + table columns list
first_data_column_position = len(indices) - len(names)
# Get the non-null columns and their dtypes
non_null_cols, dtypes = get_non_null_cols_and_dtypes(indices, columns)
# Infer common dtypes between numeric columns
# and combine CategoricalColumn categories
categories = find_common_dtypes_and_categories(non_null_cols, dtypes)
# Cast all columns to a common dtype, assign combined categories,
# and back-fill missing columns with all-null columns
cast_cols_to_common_dtypes(indices, columns, dtypes, categories)
# Construct input tables with the index and data columns in the same
# order. This strips the given index/column names and replaces the
# names with their integer positions in the `cols` list
tables = []
for i, cols in enumerate(columns):
table_cols = cols[first_data_column_position:]
table_names = indices[first_data_column_position:]
table = cls(data=dict(zip(table_names, table_cols)))
if 1 == first_data_column_position:
table._index = as_index(cols[0])
elif first_data_column_position > 1:
index_cols = cols[:first_data_column_position]
index_names = indices[:first_data_column_position]
table._index = cls(data=dict(zip(index_names, index_cols)))
tables.append(table)
# Concatenate the Tables
out = cls._from_table(
libcudf.concat.concat_tables(tables, ignore_index=ignore_index)
)
# Reassign the categories for any categorical table cols
reassign_categories(
categories, out._data, indices[first_data_column_position:]
)
# Reassign the categories for any categorical index cols
reassign_categories(
categories, out._index._data, indices[:first_data_column_position]
)
# Reassign index and column names
if isinstance(objs[0].columns, pd.MultiIndex):
out.columns = objs[0].columns
else:
out.columns = names
out._index.name = objs[0]._index.name
out._index.names = objs[0]._index.names
return out
def _get_columns_by_label(self, labels, downcast=False):
"""
Returns columns of the Frame specified by `labels`
If downcast is True, try and downcast from a DataFrame to a Series
"""
new_data = self._data.get_by_label(labels)
if downcast:
if is_scalar(labels):
nlevels = 1
elif isinstance(labels, tuple):
nlevels = len(labels)
if self._data.multiindex is False or nlevels == self._data.nlevels:
return self._constructor_sliced(
new_data, name=labels, index=self.index
)
return self._constructor(
new_data, columns=new_data.to_pandas_index(), index=self.index
)
def _get_columns_by_index(self, indices):
"""
Returns columns of the Frame specified by `labels`
"""
data = self._data.get_by_index(indices)
return self._constructor(
data, columns=data.to_pandas_index(), index=self.index
)
def _gather(self, gather_map, keep_index=True):
if not pd.api.types.is_integer_dtype(gather_map.dtype):
gather_map = gather_map.astype("int32")
result = self.__class__._from_table(
libcudf.copying.gather(
self, as_column(gather_map), keep_index=keep_index
)
)
result._copy_categories(self)
return result
def _hash(self, initial_hash_values=None):
return libcudf.hash.hash(self, initial_hash_values)
def _hash_partition(
self, columns_to_hash, num_partitions, keep_index=True
):
output, offsets = libcudf.hash.hash_partition(
self, columns_to_hash, num_partitions, keep_index
)
output = self.__class__._from_table(output)
output._copy_categories(self, include_index=keep_index)
return output, offsets
def _as_column(self):
"""
_as_column : Converts a single columned Frame to Column
"""
assert (
self._num_columns == 1
and self._index is None
and self._column_names[0] is None
), """There should be only one data column,
no index and None as the name to use this method"""
return self._data[None].copy(deep=False)
def _scatter(self, key, value):
result = self._from_table(libcudf.copying.scatter(value, key, self))
result._copy_categories(self)
return result
def _empty_like(self, keep_index=True):
result = self._from_table(
libcudf.copying.table_empty_like(self, keep_index)
)
result._copy_categories(self, include_index=keep_index)
return result
def _slice(self, arg):
"""
_slice : slice the frame as per the arg
Parameters
----------
arg : should always be of type slice and doesn't handle step
"""
from cudf.core.index import RangeIndex
num_rows = len(self)
if num_rows == 0:
return self
start, stop, stride = arg.indices(num_rows)
# This is just to handle RangeIndex type, stop
# it from materializing unnecessarily
keep_index = True
if self.index is not None and isinstance(self.index, RangeIndex):
keep_index = False
if start < 0:
start = start + num_rows
if stop < 0:
stop = stop + num_rows
if start > stop and (stride is None or stride == 1):
return self._empty_like(keep_index)
else:
start = len(self) if start > num_rows else start
stop = len(self) if stop > num_rows else stop
if stride is not None and stride != 1:
return self._gather(
cupy.arange(start, stop=stop, step=stride, dtype=np.int32)
)
else:
result = self._from_table(
libcudf.copying.table_slice(
self, [start, stop], keep_index
)[0]
)
result._copy_categories(self, include_index=keep_index)
# Adding index of type RangeIndex back to
# result
if keep_index is False and self.index is not None:
result.index = self.index[start:stop]
result.columns = self.columns
return result
def _normalize_scalars(self, other):
"""
Try to normalizes scalar values as per self dtype
"""
if (
other is not None
and (isinstance(other, float) and not np.isnan(other))
) and (self.dtype.type(other) != other):
raise TypeError(
"Cannot safely cast non-equivalent {} to {}".format(
type(other).__name__, self.dtype.name
)
)
return (
self.dtype.type(other)
if (
other is not None
and (isinstance(other, float) and not np.isnan(other))
)
else other
)
def _normalize_columns_and_scalars_type(self, other):
"""
Try to normalize the other's dtypes as per self.
Parameters
----------
self : Can be a DataFrame or Series or Index
other : Can be a DataFrame, Series, Index, Array
like object or a scalar value
if self is DataFrame, other can be only a
scalar or array like with size of number of columns
in DataFrame or a DataFrame with same dimenstion
if self is Series, other can be only a scalar or
a series like with same length as self
Returns:
--------
A dataframe/series/list/scalar form of normalized other
"""
if isinstance(self, cudf.DataFrame) and isinstance(
other, cudf.DataFrame
):
return [
other[self_col].astype(self._data[self_col].dtype)._column
for self_col in self._data.names
]
elif isinstance(self, (cudf.Series, cudf.Index)) and not is_scalar(
other
):
other = as_column(other)
return other.astype(self.dtype)
else:
# Handles scalar or list/array like scalars
if isinstance(self, (cudf.Series, cudf.Index)) and is_scalar(
other
):
return self._normalize_scalars(other)
elif (self, cudf.DataFrame):
out = []
if is_scalar(other):
other = [other for i in range(len(self._data.names))]
out = [
self[in_col_name]._normalize_scalars(sclr)
for in_col_name, sclr in zip(self._data.names, other)
]
return out
else:
raise ValueError(
"Inappropriate input {} and other {} combination".format(
type(self), type(other)
)
)
def where(self, cond, other=None, inplace=False):
"""
Replace values where the condition is False.
Parameters
----------
cond : bool Series/DataFrame, array-like
Where cond is True, keep the original value.
Where False, replace with corresponding value from other.
Callables are not supported.
other: scalar, list of scalars, Series/DataFrame
Entries where cond is False are replaced with
corresponding value from other. Callables are not
supported. Default is None.
DataFrame expects only Scalar or array like with scalars or
dataframe with same dimention as self.
Series expects only scalar or series like with same length
inplace : bool, default False
Whether to perform the operation in place on the data.
Returns
-------
Same type as caller
Examples:
---------
>>> import cudf
>>> df = cudf.DataFrame({"A":[1, 4, 5], "B":[3, 5, 8]})
>>> df.where(df % 2 == 0, [-1, -1])
A B
0 -1 -1
1 4 -1
2 -1 8
>>> ser = cudf.Series([4, 3, 2, 1, 0])
>>> ser.where(ser > 2, 10)
0 4
1 3
2 10
3 10
4 10
dtype: int64
>>> ser.where(ser > 2)
0 4
1 3
2 null
3 null
4 null
dtype: int64
"""
if isinstance(self, cudf.DataFrame):
if hasattr(cond, "__cuda_array_interface__"):
cond = self.from_gpu_matrix(
cond, columns=self._data.names, index=self.index
)
elif not isinstance(cond, cudf.DataFrame):
cond = self.from_pandas(pd.DataFrame(cond))
common_cols = set(self._data.names).intersection(
set(cond._data.names)
)
if len(common_cols) > 0:
# If `self` and `cond` are having unequal index,
# then re-index `cond`.
if len(self.index) != len(cond.index) or any(
self.index != cond.index
):
cond = cond.reindex(self.index)
else:
if cond.shape != self.shape:
raise ValueError(
"""Array conditional must be same shape as self"""
)
# Setting `self` column names to `cond`
# as `cond` has no column names.
cond.columns = self.columns
other = self._normalize_columns_and_scalars_type(other)
out_df = cudf.DataFrame(index=self.index)
if len(self._columns) != len(other):
raise ValueError(
"""Replacement list length or number of dataframe columns
should be equal to Number of columns of dataframe"""
)
for column_name, other_column in zip(self._data.names, other):
input_col = self._data[column_name]
if column_name in cond._data:
if is_categorical_dtype(input_col.dtype):
if np.isscalar(other_column):
try:
other_column = input_col._encode(other_column)
except ValueError:
# When other is not present in categories,
# fill with Null.
other_column = None
elif hasattr(other_column, "codes"):
other_column = other_column.codes
input_col = input_col.codes
result = libcudf.copying.copy_if_else(
input_col, other_column, cond._data[column_name]
)
if is_categorical_dtype(self._data[column_name].dtype):
result = build_categorical_column(
categories=self._data[column_name].categories,
codes=as_column(
result.base_data, dtype=result.dtype
),
mask=result.base_mask,
size=result.size,
offset=result.offset,
ordered=self._data[column_name].ordered,
)
else:
from cudf._lib.null_mask import MaskState, create_null_mask
out_mask = create_null_mask(
len(input_col), state=MaskState.ALL_NULL
)
result = input_col.set_mask(out_mask)
out_df[column_name] = self[column_name].__class__(result)
return self._mimic_inplace(out_df, inplace=inplace)
else:
if isinstance(other, cudf.DataFrame):
raise NotImplementedError(
"cannot align with a higher dimensional Frame"
)
other = self._normalize_columns_and_scalars_type(other)
cond = as_column(cond)
if len(cond) != len(self):
raise ValueError(
"""Array conditional must be same shape as self"""
)
input_col = self._data[self.name]
if is_categorical_dtype(input_col.dtype):
if np.isscalar(other):
try:
other = input_col._encode(other)
except ValueError:
# When other is not present in categories,
# fill with Null.
other = None
elif hasattr(other, "codes"):
other = other.codes
input_col = input_col.codes
result = libcudf.copying.copy_if_else(input_col, other, cond)
if is_categorical_dtype(self.dtype):
result = build_categorical_column(
categories=self._data[self.name].categories,
codes=as_column(result.base_data, dtype=result.dtype),
mask=result.base_mask,
size=result.size,
offset=result.offset,
ordered=self._data[self.name].ordered,
)
if isinstance(self, cudf.Index):
from cudf.core.index import as_index
result = as_index(result, name=self.name)
else:
result = self._copy_construct(data=result)
return self._mimic_inplace(result, inplace=inplace)
def mask(self, cond, other=None, inplace=False):
"""
Replace values where the condition is True.
Parameters
----------
cond : bool Series/DataFrame, array-like
Where cond is False, keep the original value.
Where True, replace with corresponding value from other.
Callables are not supported.
other: scalar, list of scalars, Series/DataFrame
Entries where cond is True are replaced with
corresponding value from other. Callables are not
supported. Default is None.
DataFrame expects only Scalar or array like with scalars or
dataframe with same dimention as self.
Series expects only scalar or series like with same length
inplace : bool, default False
Whether to perform the operation in place on the data.
Returns
-------
Same type as caller
Examples:
---------
>>> import cudf
>>> df = cudf.DataFrame({"A":[1, 4, 5], "B":[3, 5, 8]})
>>> df.mask(df % 2 == 0, [-1, -1])
A B
0 1 3
1 -1 5
2 5 -1
>>> ser = cudf.Series([4, 3, 2, 1, 0])
>>> ser.mask(ser > 2, 10)
0 10
1 10
2 2
3 1
4 0
dtype: int64
>>> ser.mask(ser > 2)
0 null
1 null
2 2
3 1
4 0
dtype: int64
"""
if not hasattr(cond, "__invert__"):
# We Invert `cond` below and call `where`, so
# making sure the object supports
# `~`(inversion) operator or `__invert__` method
cond = cupy.asarray(cond)
return self.where(cond=~cond, other=other, inplace=inplace)
def _partition(self, scatter_map, npartitions, keep_index=True):
output_table, output_offsets = libcudf.partitioning.partition(
self, scatter_map, npartitions, keep_index
)
# due to the split limitation mentioned
# here: https://github.com/rapidsai/cudf/issues/4607
# we need to remove first & last elements in offsets.
# TODO: Remove this after the above issue is fixed.
output_offsets = output_offsets[1:-1]
result = libcudf.copying.table_split(
output_table, output_offsets, keep_index=keep_index
)
result = [self.__class__._from_table(tbl) for tbl in result]
for frame in result:
frame._copy_categories(self, include_index=keep_index)
if npartitions:
for i in range(npartitions - len(result)):
result.append(self._empty_like(keep_index))
return result
def dropna(self, axis=0, how="any", subset=None, thresh=None):
"""
Drops rows (or columns) containing nulls from a Column.
Parameters
----------
axis : {0, 1}, optional
Whether to drop rows (axis=0, default) or columns (axis=1)
containing nulls.
how : {"any", "all"}, optional
Specifies how to decide whether to drop a row (or column).
any (default) drops rows (or columns) containing at least
one null value. all drops only rows (or columns) containing
*all* null values.
subset : list, optional
List of columns to consider when dropping rows (all columns
are considered by default). Alternatively, when dropping
columns, subset is a list of rows to consider.
thresh: int, optional
If specified, then drops every row (or column) containing
less than `thresh` non-null values
Returns
-------
Copy of the DataFrame with rows/columns containing nulls dropped.
"""
if axis == 0:
return self._drop_na_rows(how=how, subset=subset, thresh=thresh)
else:
return self._drop_na_columns(how=how, subset=subset, thresh=thresh)
def _drop_na_rows(self, how="any", subset=None, thresh=None):
"""
Drops null rows from `self`.
how : {"any", "all"}, optional
Specifies how to decide whether to drop a row.
any (default) drops rows containing at least
one null value. all drops only rows containing
*all* null values.
subset : list, optional
List of columns to consider when dropping rows.
thresh: int, optional
If specified, then drops every row containing
less than `thresh` non-null values.
"""
if subset is None:
subset = self._column_names
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self._data.names
):
subset = (subset,)
diff = set(subset) - set(self._data)
if len(diff) != 0:
raise KeyError("columns {!r} do not exist".format(diff))
subset_cols = [
name for name, col in self._data.items() if name in subset
]
if len(subset_cols) == 0:
return self.copy(deep=True)
result = self.__class__._from_table(
libcudf.stream_compaction.drop_nulls(
self, how=how, keys=subset, thresh=thresh
)
)
result._copy_categories(self)
return result
def _drop_na_columns(self, how="any", subset=None, thresh=None):
"""
Drop columns containing nulls
"""
out_cols = []
if subset is None:
df = self
else:
df = self.take(subset)
if thresh is None:
if how == "all":
thresh = 1
else:
thresh = len(df)
for col in self._data.names:
if (len(df[col]) - df[col].null_count) < thresh:
continue
out_cols.append(col)
return self[out_cols]
def _apply_boolean_mask(self, boolean_mask):
"""
Applies boolean mask to each row of `self`,
rows corresponding to `False` is dropped
"""
result = self.__class__._from_table(
libcudf.stream_compaction.apply_boolean_mask(
self, as_column(boolean_mask)
)
)
result._copy_categories(self)
return result
def _quantiles(
self,
q,
interpolation="LINEAR",
is_sorted=False,
column_order=(),
null_precedence=(),
):
interpolation = libcudf.types.Interpolation[interpolation]
is_sorted = libcudf.types.Sorted["YES" if is_sorted else "NO"]
column_order = [libcudf.types.Order[key] for key in column_order]
null_precedence = [
libcudf.types.NullOrder[key] for key in null_precedence
]
result = self.__class__._from_table(
libcudf.quantiles.quantiles(
self,
q,
interpolation,
is_sorted,
column_order,
null_precedence,
)
)
result._copy_categories(self)
return result
def rank(
self,
axis=0,
method="average",
numeric_only=None,
na_option="keep",
ascending=True,
pct=False,
):
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value
(i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending.
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
"""
if method not in {"average", "min", "max", "first", "dense"}:
raise KeyError(method)
method_enum = libcudf.sort.RankMethod[method.upper()]
if na_option not in {"keep", "top", "bottom"}:
raise KeyError(na_option)
# TODO code for selecting numeric columns
source = self
if numeric_only:
warnings.warn("numeric_only=True is not implemented yet")
out_rank_table = libcudf.sort.rank_columns(
source, method_enum, na_option, ascending, pct
)
return self._from_table(out_rank_table).astype(np.float64)
def repeat(self, repeats, axis=None):
"""Repeats elements consecutively
Parameters
----------
repeats : int, array, numpy array, or Column
the number of times to repeat each element
Example
-------
>>> import cudf as cudf
>>> s = cudf.Series([0, 2]) # or DataFrame
>>> s
0 0
1 2
dtype: int64
>>> s.repeat([3, 4])
0 0
0 0
0 0
1 2
1 2
1 2
1 2
dtype: int64
>>> s.repeat(2)
0 0
0 0
1 2
1 2
dtype: int64
>>>
"""
if axis is not None:
raise NotImplementedError(
"Only axis=`None` supported at this time."
)
return self._repeat(repeats)
def _repeat(self, count):
if is_scalar(count):
count = Scalar(count)
else:
count = as_column(count)
result = self.__class__._from_table(
libcudf.filling.repeat(self, count)
)
result._copy_categories(self)
return result
def _fill(self, fill_values, begin, end, inplace):
col_and_fill = zip(self._columns, fill_values)
if not inplace:
data_columns = (c._fill(v, begin, end) for (c, v) in col_and_fill)
data = zip(self._column_names, data_columns)
return self.__class__._from_table(Frame(data, self._index))
for (c, v) in col_and_fill:
c.fill(v, begin, end, inplace=True)
return self
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""Shift values by `periods` positions.
"""
assert axis in (None, 0) and freq is None
return self._shift(periods)
def _shift(self, offset, fill_value=None):
data_columns = (col.shift(offset, fill_value) for col in self._columns)
data = zip(self._column_names, data_columns)
return self.__class__._from_table(Frame(data, self._index))
def drop_duplicates(self, subset=None, keep="first", nulls_are_equal=True):
"""
Drops rows in frame as per duplicate rows in `subset` columns from
self.
subset : list, optional
List of columns to consider when dropping rows.
keep : ["first", "last", False] first will keep first of duplicate,
last will keep last of the duplicate and False drop all
duplicate
nulls_are_equal: null elements are considered equal to other null
elements
"""
if subset is None:
subset = self._column_names
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self._data.names
):
subset = (subset,)
diff = set(subset) - set(self._data)
if len(diff) != 0:
raise KeyError("columns {!r} do not exist".format(diff))
subset_cols = [name for name in self._column_names if name in subset]
if len(subset_cols) == 0:
return self.copy(deep=True)
result = self._from_table(
libcudf.stream_compaction.drop_duplicates(
self, keys=subset, keep=keep, nulls_are_equal=nulls_are_equal
)
)
result._copy_categories(self)
return result
def replace(self, to_replace, replacement):
copy_data = self._data.copy()
for name, col in copy_data.items():
if not (to_replace is None and replacement is None):
try:
(
col_all_nan,
col_replacement,
col_to_replace,
) = _get_replacement_values(
to_replace=to_replace,
replacement=replacement,
col_name=name,
column=col,
)
copy_data[name] = col.find_and_replace(
col_to_replace, col_replacement, col_all_nan
)
except KeyError:
# Donot change the copy_data[name]
pass
result = self._from_table(Frame(copy_data, self.index))
return result
def _copy_categories(self, other, include_index=True):
"""
Utility that copies category information from `other`
to `self`.
"""
for name, col, other_col in zip(
self._column_names, self._columns, other._columns
):
if is_categorical_dtype(other_col) and not is_categorical_dtype(
col
):
self._data[name] = build_categorical_column(
categories=other_col.categories,
codes=as_column(col.base_data, dtype=col.dtype),
mask=col.base_mask,
ordered=other_col.ordered,
size=col.size,
offset=col.offset,
)
if include_index:
from cudf.core.index import RangeIndex
# include_index will still behave as False
# incase of self._index being a RangeIndex
if (self._index is not None) and (
not isinstance(self._index, RangeIndex)
):
self._index._copy_categories(other._index)
return self
def _unaryop(self, op):
data_columns = (col.unary_operator(op) for col in self._columns)
data = zip(self._column_names, data_columns)
return self.__class__._from_table(Frame(data, self._index))
def isnull(self):
"""Identify missing values.
"""
data_columns = (col.isnull() for col in self._columns)
data = zip(self._column_names, data_columns)
return self.__class__._from_table(Frame(data, self._index))
def isna(self):
"""Identify missing values. Alias for `isnull`
"""
return self.isnull()
def notnull(self):
"""Identify non-missing values.
"""
data_columns = (col.notnull() for col in self._columns)
data = zip(self._column_names, data_columns)
return self.__class__._from_table(Frame(data, self._index))
def notna(self):
"""Identify non-missing values. Alias for `notnull`.
"""
return self.notnull()
def interleave_columns(self):
"""
Interleave Series columns of a table into a single column.
Converts the column major table `cols` into a row major column.
Parameters
----------
cols : input Table containing columns to interleave.
Example
-------
>>> df = DataFrame([['A1', 'A2', 'A3'], ['B1', 'B2', 'B3']])
>>> df
0 [A1, A2, A3]
1 [B1, B2, B3]
>>> df.interleave_columns()
0 A1
1 B1
2 A2
3 B2
4 A3
5 B3
Returns
-------
The interleaved columns as a single column
"""
if ("category" == self.dtypes).any():
raise ValueError(
"interleave_columns does not support 'category' dtype."
)
result = self._constructor_sliced(
libcudf.reshape.interleave_columns(self)
)
return result
def tile(self, count):
"""
Repeats the rows from `self` DataFrame `count` times to form a
new DataFrame.
Parameters
----------
self : input Table containing columns to interleave.
count : Number of times to tile "rows". Must be non-negative.
Example
-------
>>> df = Dataframe([[8, 4, 7], [5, 2, 3]])
>>> count = 2
>>> df.tile(df, count)
0 1 2
0 8 4 7
1 5 2 3
0 8 4 7
1 5 2 3
Returns
-------
The table containing the tiled "rows".
"""
result = self.__class__._from_table(libcudf.reshape.tile(self, count))
result._copy_categories(self)
return result
def searchsorted(
self, values, side="left", ascending=True, na_position="last"
):
"""Find indices where elements should be inserted to maintain order
Parameters
----------
value : Frame (Shape must be consistent with self)
Values to be hypothetically inserted into Self
side : str {‘left’, ‘right’} optional, default ‘left‘
If ‘left’, the index of the first suitable location found is given
If ‘right’, return the last such index
ascending : bool optional, default True
Sorted Frame is in ascending order (otherwise descending)
na_position : str {‘last’, ‘first’} optional, default ‘last‘
Position of null values in sorted order
Returns
-------
1-D cupy array of insertion points
"""
# Call libcudf++ search_sorted primitive
from cudf.utils.dtypes import is_scalar
scalar_flag = None
if is_scalar(values):
scalar_flag = True
if not isinstance(values, Frame):
values = as_column(values)
if values.dtype != self.dtype:
self = self.astype(values.dtype)
values = values.as_frame()
outcol = libcudf.search.search_sorted(
self, values, side, ascending=ascending, na_position=na_position
)
# Retrun result as cupy array if the values is non-scalar
# If values is scalar, result is expected to be scalar.
result = cupy.asarray(outcol.data_array_view)
if scalar_flag:
return result[0].item()
else:
return result
def _get_sorted_inds(self, ascending=True, na_position="last"):
"""
Sort by the values.
Parameters
----------
ascending : bool or list of bool, default True
If True, sort values in ascending order, otherwise descending.
na_position : {‘first’ or ‘last’}, default ‘last’
Argument ‘first’ puts NaNs at the beginning, ‘last’ puts NaNs
at the end.
Returns
-------
out_column_inds : cuDF Column of indices sorted based on input
Difference from pandas:
* Support axis='index' only.
* Not supporting: inplace, kind
* Ascending can be a list of bools to control per column
"""
# This needs to be updated to handle list of bools for ascending
if ascending is True:
if na_position == "last":
na_position = 0
elif na_position == "first":
na_position = 1
elif ascending is False:
if na_position == "last":
na_position = 1
elif na_position == "first":
na_position = 0
else:
warnings.warn(
"When using a sequence of booleans for `ascending`, "
"`na_position` flag is not yet supported and defaults to "
"treating nulls as greater than all numbers"
)
na_position = 0
# If given a scalar need to construct a sequence of length # of columns
if np.isscalar(ascending):
ascending = [ascending] * self._num_columns
return libcudf.sort.order_by(self, ascending, na_position)
def sin(self):
return self._unaryop("sin")
def cos(self):
return self._unaryop("cos")
def tan(self):
return self._unaryop("tan")
def asin(self):
return self._unaryop("asin")
def acos(self):
return self._unaryop("acos")
def atan(self):
return self._unaryop("atan")
def exp(self):
return self._unaryop("exp")
def log(self):
return self._unaryop("log")
def sqrt(self):
return self._unaryop("sqrt")
@staticmethod
def _validate_merge_cfg(
lhs,
rhs,
left_on,
right_on,
on,
how,
left_index=False,
right_index=False,
lsuffix=None,
rsuffix=None,
):
"""
Error for various combinations of merge input parameters
"""
len_left_on = len(left_on) if left_on is not None else 0
len_right_on = len(right_on) if right_on is not None else 0
# must actually support the requested merge type
if how not in ["left", "inner", "outer", "leftanti", "leftsemi"]:
raise NotImplementedError(
"{!r} merge not supported yet".format(how)
)
# Passing 'on' with 'left_on' or 'right_on' is potentially ambiguous
if on:
if left_on or right_on:
raise ValueError(
'Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.'
)
# Require same total number of columns to join on in both operands
if not (len_left_on + left_index * len(lhs.index.names)) == (
len_right_on + right_index * len(rhs.index.names)
):
raise ValueError(
"Merge operands must have same number of join key columns"
)
# If nothing specified, must have common cols to use implicitly
same_named_columns = set(lhs._data.keys()) & set(rhs._data.keys())
if not (left_index or right_index):
if not (left_on or right_on):
if len(same_named_columns) == 0:
raise ValueError("No common columns to perform merge on")
for name in same_named_columns:
if not (
name in left_on
and name in right_on
and (left_on.index(name) == right_on.index(name))
):
if not (lsuffix or rsuffix):
raise ValueError(
"there are overlapping columns but "
"lsuffix and rsuffix are not defined"
)
if on:
on_keys = [on] if not isinstance(on, list) else on
for key in on_keys:
if not (key in lhs._data.keys() and key in rhs._data.keys()):
raise KeyError("Key {} not in both operands".format(on))
else:
for key in left_on:
if key not in lhs._data.keys():
raise KeyError('Key "{}" not in left operand'.format(key))
for key in right_on:
if key not in rhs._data.keys():
raise KeyError('Key "{}" not in right operand'.format(key))
def _merge(
self,
right,
on,
left_on,
right_on,
left_index,
right_index,
lsuffix,
rsuffix,
how,
method,
sort=False,
):
lhs = self
rhs = right
if left_on is None:
left_on = []
if right_on is None:
right_on = []
# Making sure that the "on" arguments are list of column names
if on:
on = [on] if isinstance(on, str) else list(on)
if left_on:
left_on = [left_on] if isinstance(left_on, str) else list(left_on)
if right_on:
right_on = (
[right_on] if isinstance(right_on, str) else list(right_on)
)
self._validate_merge_cfg(
self,
right,
left_on,
right_on,
on,
how,
left_index=left_index,
right_index=right_index,
lsuffix=lsuffix,
rsuffix=rsuffix,
)
if on:
left_on = right_on = on
same_named_columns = set(lhs._data.keys()) & set(rhs._data.keys())
if not (left_on or right_on) and not (left_index and right_index):
left_on = right_on = list(same_named_columns)
no_suffix_cols = []
for name in same_named_columns:
if left_on is not None and right_on is not None:
if name in left_on and name in right_on:
if left_on.index(name) == right_on.index(name):
no_suffix_cols.append(name)
for name in same_named_columns:
if name not in no_suffix_cols:
lhs.rename({name: "%s%s" % (name, lsuffix)}, inplace=True)
rhs.rename({name: "%s%s" % (name, rsuffix)}, inplace=True)
if name in left_on:
left_on[left_on.index(name)] = "%s%s" % (name, lsuffix)
if name in right_on:
right_on[right_on.index(name)] = "%s%s" % (name, rsuffix)
categorical_dtypes = {}
for name, col in itertools.chain(lhs._data.items(), rhs._data.items()):
if is_categorical_dtype(col):
categorical_dtypes[name] = col.dtype
# Save the order of the original column names for preservation later
org_names = list(itertools.chain(lhs._data.keys(), rhs._data.keys()))
# If neither left_index or right_index specified, that data won't
# be carried through the join. We'll get a new RangeIndex afterwards
lhs_full_view = False
rhs_full_view = False
if left_index:
lhs_full_view = True
if right_index:
rhs_full_view = True
# potentially do an implicit typecast
(lhs, rhs, to_categorical) = self._typecast_before_merge(
lhs, rhs, left_on, right_on, left_index, right_index, how
)
gdf_result = libcudf.join.join(
lhs,
rhs,
left_on,
right_on,
how,
method,
left_index=lhs_full_view,
right_index=rhs_full_view,
)
gdf_data = list(gdf_result._data.items())
result = []
cat_codes = []
for org_name in org_names:
for i in range(len(gdf_data)):
if gdf_data[i][0] == org_name:
result.append(gdf_data.pop(i))
break
for cat_name in to_categorical:
for i in range(len(gdf_data)):
if gdf_data[i][0] == cat_name + "_codes":
cat_codes.append(gdf_data.pop(i))
assert len(gdf_data) == 0
cat_codes = dict(cat_codes)
# Build a new data frame based on the merged columns from GDF
to_frame_data = OrderedDict()
for name, col in result:
if is_string_dtype(col):
to_frame_data[name] = col
elif is_categorical_dtype(categorical_dtypes.get(name, col.dtype)):
dtype = categorical_dtypes.get(name, col.dtype)
to_frame_data[name] = column.build_categorical_column(
categories=dtype.categories,
codes=cat_codes.get(str(name) + "_codes", col),
mask=col.base_mask,
size=col.size,
offset=col.offset,
ordered=dtype.ordered,
)
else:
to_frame_data[name] = column.build_column(
col.base_data,
dtype=categorical_dtypes.get(name, col.dtype),
mask=col.base_mask,
offset=col.offset,
size=col.size,
)
gdf_result._data = to_frame_data
to_return = self.__class__._from_table(gdf_result)
# If sort=True, Pandas would sort on the key columns in the
# same order as given in 'on'. If the indices are used as
# keys, the index will be sorted. If one index is specified,
# the key column on the other side will be used to sort.
# If no index is specified, return a new RangeIndex
if sort:
to_sort = self.__class__()
if left_index and right_index:
by = list(to_return._index._data.columns)
if left_on and right_on:
by += list(to_return[left_on]._data.columns)
elif left_index:
by = list(to_return[right_on]._data.columns)
elif right_index:
by = list(to_return[left_on]._data.columns)
else:
# left_on == right_on, or different names but same columns
# in both cases we can sort by either
by = list(to_return[left_on]._data.columns)
for i, col in enumerate(by):
to_sort[i] = col
inds = to_sort.argsort()
to_return = to_return.take(
inds, keep_index=(left_index or right_index)
)
return to_return
else:
return to_return
def _typecast_before_merge(
self, lhs, rhs, left_on, right_on, left_index, right_index, how
):
def casting_rules(lhs, rhs, dtype_l, dtype_r, how):
cast_warn = "can't safely cast column {} from {} with type \
{} to {}, upcasting to {}"
ctgry_err = "can't implicitly cast column {0} to categories \
from {1} during {1} join"
rtn = None
if | pd.api.types.is_dtype_equal(dtype_l, dtype_r) | pandas.api.types.is_dtype_equal |
# The test is referenced from https://hdbscan.readthedocs.io/en/latest/performance_and_scalability.html
import time
import hdbscan
import warnings
import sklearn.cluster
import scipy.cluster
import sklearn.datasets
import numpy as np
import pandas as pd
import seaborn as sns
from numpy.linalg import norm
from classix.aggregation_test import aggregate
from classix import CLASSIX
from quickshift.QuickshiftPP import *
from sklearn import metrics
import matplotlib.pyplot as plt
from threadpoolctl import threadpool_limits
np.random.seed(0)
def benchmark_algorithm_tdim(dataset_dimensions, cluster_function, function_args, function_kwds,
dataset_size=10000, dataset_n_clusters=10, max_time=45, sample_size=10, algorithm=None):
# Initialize the result with NaNs so that any unfilled entries
# will be considered NULL when we convert to a pandas dataframe at the end
result_time = np.nan * np.ones((len(dataset_dimensions), sample_size))
result_ar = np.nan * np.ones((len(dataset_dimensions), sample_size))
result_ami = np.nan * np.ones((len(dataset_dimensions), sample_size))
for index, dimension in enumerate(dataset_dimensions):
for s in range(sample_size):
# Use sklearns make_blobs to generate a random dataset with specified size
# dimension and number of clusters
# set cluster_std=0.1 to ensure clustering rely less on tuning parameters.
data, labels = sklearn.datasets.make_blobs(n_samples=dataset_size,
n_features=dimension,
centers=dataset_n_clusters,
cluster_std=1)
# Start the clustering with a timer
start_time = time.time()
cluster_function.fit(data, *function_args, **function_kwds)
time_taken = time.time() - start_time
if algorithm == "Quickshift++":
preds = cluster_function.memberships
else:
preds = cluster_function.labels_
# print("labels num:", len(np.unique(preds)))
ar = metrics.adjusted_rand_score(labels, preds)
ami = metrics.adjusted_mutual_info_score(labels, preds)
# If we are taking more than max_time then abort -- we don't
# want to spend excessive time on slow algorithms
if time_taken > max_time: # Luckily, it won't happens in our experiment.
result_time[index, s] = time_taken
result_ar[index, s] = ar
result_ami[index, s] = ami
return pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_time.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_ar.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_ami.flatten()]).T, columns=['x','y'])
else:
result_time[index, s] = time_taken
result_ar[index, s] = ar
result_ami[index, s] = ami
# Return the result as a dataframe for easier handling with seaborn afterwards
return pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_time.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_ar.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_dimensions.repeat(sample_size), result_ami.flatten()]).T, columns=['x','y'])
def benchmark_algorithm_tsize(dataset_sizes, cluster_function, function_args, function_kwds,
dataset_dimension=10, dataset_n_clusters=10, max_time=45, sample_size=10, algorithm=None):
# Initialize the result with NaNs so that any unfilled entries
# will be considered NULL when we convert to a pandas dataframe at the end
result_time = np.nan * np.ones((len(dataset_sizes), sample_size))
result_ar = np.nan * np.ones((len(dataset_sizes), sample_size))
result_ami = np.nan * np.ones((len(dataset_sizes), sample_size))
for index, size in enumerate(dataset_sizes):
for s in range(sample_size):
# Use sklearns make_blobs to generate a random dataset with specified size
# dimension and number of clusters
# set cluster_std=0.1 to ensure clustering rely less on tuning parameters.
data, labels = sklearn.datasets.make_blobs(n_samples=size,
n_features=dataset_dimension,
centers=dataset_n_clusters,
cluster_std=1)
# Start the clustering with a timer
start_time = time.time()
cluster_function.fit(data, *function_args, **function_kwds)
time_taken = time.time() - start_time
if algorithm == "Quickshift++":
preds = cluster_function.memberships
else:
preds = cluster_function.labels_
# print("labels num:", len(np.unique(preds)))
ar = metrics.adjusted_rand_score(labels, preds)
ami = metrics.adjusted_mutual_info_score(labels, preds)
# If we are taking more than max_time then abort -- we don't
# want to spend excessive time on slow algorithms
if time_taken > max_time: # Luckily, it won't happens in our experiment.
result_time[index, s] = time_taken
result_ar[index, s] = ar
result_ami[index, s] = ami
return pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_time.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_ar.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_ami.flatten()]).T, columns=['x','y'])
else:
result_time[index, s] = time_taken
result_ar[index, s] = ar
result_ami[index, s] = ami
# Return the result as a dataframe for easier handling with seaborn afterwards
return pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_time.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_ar.flatten()]).T, columns=['x','y']), \
pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size), result_ami.flatten()]).T, columns=['x','y'])
def rn_gaussian_dim():
warnings.filterwarnings("ignore")
sns.set_context('poster')
sns.set_palette('Paired', 10)
sns.set_color_codes()
dataset_dimensions = np.hstack([np.arange(1, 11) * 10])
np.random.seed(0)
with threadpool_limits(limits=1, user_api='blas'):
k_means = sklearn.cluster.KMeans(n_clusters=10, init='k-means++')
k_means_time, k_means_ar, k_means_ami = benchmark_algorithm_tdim(dataset_dimensions, k_means, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=10, min_samples=1, n_jobs=1, algorithm='ball_tree')
dbscan_btree_time, dbscan_btree_ar, dbscan_btree_ami = benchmark_algorithm_tdim(dataset_dimensions, dbscan, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=10, min_samples=1, n_jobs=1, algorithm='kd_tree')
dbscan_kdtree_time, dbscan_kdtree_ar, dbscan_kdtree_ami = benchmark_algorithm_tdim(dataset_dimensions, dbscan, (), {})
hdbscan_ = hdbscan.HDBSCAN(algorithm='best', core_dist_n_jobs=1)
hdbscan_time, hdbscan_ar, hdbscan_ami = benchmark_algorithm_tdim(dataset_dimensions, hdbscan_, (), {})
classix = CLASSIX(sorting='pca', radius=0.3, minPts=5, group_merging='distance', verbose=0)
classix_time, classix_ar, classix_ami = benchmark_algorithm_tdim(dataset_dimensions, classix, (), {})
quicks = QuickshiftPP(k=20, beta=0.7)
quicks_time, quicks_ar, quicks_ami = benchmark_algorithm_tdim(dataset_dimensions, quicks, (), {}, algorithm='Quickshift++')
k_means_time.to_csv("results/exp1/gd_kmeans_time.csv",index=False)
dbscan_kdtree_time.to_csv("results/exp1/gd_dbscan_kdtree_time.csv",index=False)
dbscan_btree_time.to_csv("results/exp1/gd_dbscan_btree_time.csv",index=False)
hdbscan_time.to_csv("results/exp1/gd_hdbscan_time.csv",index=False)
classix_time.to_csv("results/exp1/gd_classix_time.csv",index=False)
quicks_time.to_csv("results/exp1/gd_quicks_time.csv",index=False)
k_means_ar.to_csv("results/exp1/gd_kmeans_ar.csv",index=False)
dbscan_kdtree_ar.to_csv("results/exp1/gd_dbscan_kdtree_ar.csv",index=False)
dbscan_btree_ar.to_csv("results/exp1/gd_dbscan_btree_ar.csv",index=False)
hdbscan_ar.to_csv("results/exp1/gd_hdbscan_ar.csv",index=False)
classix_ar.to_csv("results/exp1/gd_classix_ar.csv",index=False)
quicks_ar.to_csv("results/exp1/gd_quicks_ar.csv",index=False)
def rn_gaussian_size():
warnings.filterwarnings("ignore")
sns.set_context('poster')
sns.set_palette('Paired', 10)
sns.set_color_codes()
np.random.seed(0)
dataset_sizes = np.hstack([np.arange(1, 11) * 5000])
np.random.seed(0)
with threadpool_limits(limits=1, user_api='blas'):
k_means = sklearn.cluster.KMeans(n_clusters=10, init='k-means++')
k_means_time, k_means_ar, k_means_ami = benchmark_algorithm_tsize(dataset_sizes, k_means, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=3, min_samples=1, n_jobs=1, algorithm='ball_tree')
dbscan_btree_time, dbscan_btree_ar, dbscan_btree_ami = benchmark_algorithm_tsize(dataset_sizes, dbscan, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=3, min_samples=1, n_jobs=1, algorithm='kd_tree')
dbscan_kdtree_time, dbscan_kdtree_ar, dbscan_kdtree_ami = benchmark_algorithm_tsize(dataset_sizes, dbscan, (), {})
hdbscan_ = hdbscan.HDBSCAN(algorithm='best', core_dist_n_jobs=1)
hdbscan_time, hdbscan_ar, hdbscan_ami = benchmark_algorithm_tsize(dataset_sizes, hdbscan_, (), {})
classix = CLASSIX(sorting='pca', radius=0.3, minPts=5, group_merging='distance', verbose=0)
classix_time, classix_ar, classix_ami = benchmark_algorithm_tsize(dataset_sizes, classix, (), {})
quicks = QuickshiftPP(k=20, beta=0.7)
quicks_time, quicks_ar, quicks_ami = benchmark_algorithm_tsize(dataset_sizes, quicks, (), {}, algorithm='Quickshift++')
k_means_time.to_csv("results/exp1/gs_kmeans_time.csv",index=False)
dbscan_kdtree_time.to_csv("results/exp1/gs_dbscan_kdtree_time.csv",index=False)
dbscan_btree_time.to_csv("results/exp1/gs_dbscan_btree_time.csv",index=False)
hdbscan_time.to_csv("results/exp1/gs_hdbscan_time.csv",index=False)
classix_time.to_csv("results/exp1/gs_classix_time.csv",index=False)
quicks_time.to_csv("results/exp1/gs_quicks_time.csv",index=False)
k_means_ar.to_csv("results/exp1/gs_kmeans_ar.csv",index=False)
dbscan_kdtree_ar.to_csv("results/exp1/gs_dbscan_kdtree_ar.csv",index=False)
dbscan_btree_ar.to_csv("results/exp1/gs_dbscan_btree_ar.csv",index=False)
hdbscan_ar.to_csv("results/exp1/gs_hdbscan_ar.csv",index=False)
classix_ar.to_csv("results/exp1/gs_classix_ar.csv",index=False)
quicks_ar.to_csv("results/exp1/gs_quicks_ar.csv",index=False)
def run_gassian_plot():
# -------------------------------dim
k_means_time = pd.read_csv("results/exp1/gd_kmeans_time.csv")
dbscan_kdtree_time = pd.read_csv("results/exp1/gd_dbscan_kdtree_time.csv")
dbscan_btree_time = pd.read_csv("results/exp1/gd_dbscan_btree_time.csv")
hdbscan_time = pd.read_csv("results/exp1/gd_hdbscan_time.csv")
classix_time = pd.read_csv("results/exp1/gd_classix_time.csv")
quicks_time = pd.read_csv("results/exp1/gd_quicks_time.csv")
k_means_ar = pd.read_csv("results/exp1/gd_kmeans_ar.csv")
dbscan_kdtree_ar = pd.read_csv("results/exp1/gd_dbscan_kdtree_ar.csv")
dbscan_btree_ar = pd.read_csv("results/exp1/gd_dbscan_btree_ar.csv")
hdbscan_ar = pd.read_csv("results/exp1/gd_hdbscan_ar.csv")
classix_ar = pd.read_csv("results/exp1/gd_classix_ar.csv")
quicks_ar = | pd.read_csv("results/exp1/gd_quicks_ar.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), | pd.Period('2015Q4') | pandas.Period |
import re
from inspect import isclass
import numpy as np
import pandas as pd
import pytest
from mock import patch
import woodwork as ww
from woodwork.accessor_utils import (
_is_dask_dataframe,
_is_dask_series,
_is_koalas_dataframe,
_is_koalas_series,
init_series,
)
from woodwork.exceptions import (
ColumnNotPresentError,
IndexTagRemovedWarning,
ParametersIgnoredWarning,
TypeConversionError,
TypingInfoMismatchWarning,
WoodworkNotInitError,
)
from woodwork.logical_types import (
URL,
Address,
Age,
AgeFractional,
AgeNullable,
Boolean,
BooleanNullable,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IntegerNullable,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
Unknown,
)
from woodwork.table_accessor import (
WoodworkTableAccessor,
_check_index,
_check_logical_types,
_check_partial_schema,
_check_time_index,
_check_unique_column_names,
_check_use_standard_tags,
_infer_missing_logical_types,
)
from woodwork.table_schema import TableSchema
from woodwork.tests.testing_utils import (
is_property,
is_public_method,
to_pandas,
validate_subset_schema,
)
from woodwork.tests.testing_utils.table_utils import assert_schema_equal
from woodwork.utils import import_or_none
dd = import_or_none("dask.dataframe")
ks = import_or_none("databricks.koalas")
def test_check_index_errors(sample_df):
error_message = "Specified index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_index(dataframe=sample_df, index="foo")
if isinstance(sample_df, pd.DataFrame):
# Does not check for index uniqueness with Dask
error_message = "Index column must be unique"
with pytest.raises(LookupError, match=error_message):
_check_index(sample_df, index="age")
def test_check_logical_types_errors(sample_df):
error_message = "logical_types must be a dictionary"
with pytest.raises(TypeError, match=error_message):
_check_logical_types(sample_df, logical_types="type")
bad_logical_types_keys = {
"full_name": None,
"age": None,
"birthday": None,
"occupation": None,
}
error_message = re.escape(
"logical_types contains columns that are not present in dataframe: ['birthday', 'occupation']"
)
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_logical_types(sample_df, bad_logical_types_keys)
def test_check_time_index_errors(sample_df):
error_message = "Specified time index column `foo` not found in dataframe"
with pytest.raises(ColumnNotPresentError, match=error_message):
_check_time_index(dataframe=sample_df, time_index="foo")
def test_check_unique_column_names_errors(sample_df):
if _is_koalas_dataframe(sample_df):
pytest.skip("Koalas enforces unique column names")
duplicate_cols_df = sample_df.copy()
if _is_dask_dataframe(sample_df):
duplicate_cols_df = dd.concat(
[duplicate_cols_df, duplicate_cols_df["age"]], axis=1
)
else:
duplicate_cols_df.insert(0, "age", [18, 21, 65, 43], allow_duplicates=True)
with pytest.raises(
IndexError, match="Dataframe cannot contain duplicate columns names"
):
_check_unique_column_names(duplicate_cols_df)
def test_check_use_standard_tags_errors():
error_message = "use_standard_tags must be a dictionary or a boolean"
with pytest.raises(TypeError, match=error_message):
_check_use_standard_tags(1)
def test_accessor_init(sample_df):
assert sample_df.ww.schema is None
sample_df.ww.init()
assert isinstance(sample_df.ww.schema, TableSchema)
def test_accessor_schema_property(sample_df):
sample_df.ww.init()
assert sample_df.ww._schema is not sample_df.ww.schema
assert sample_df.ww._schema == sample_df.ww.schema
def test_set_accessor_name(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.name = "name"
df.ww.init()
assert df.ww.name is None
df.ww.name = "name"
assert df.ww.schema.name == "name"
assert df.ww.name == "name"
def test_rename_init_with_name(sample_df):
df = sample_df.copy()
df.ww.init(name="name")
assert df.ww.name == "name"
df.ww.name = "new_name"
assert df.ww.schema.name == "new_name"
assert df.ww.name == "new_name"
def test_name_error_on_init(sample_df):
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(name=123)
def test_name_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table name must be a string"
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.name = 123
def test_name_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.name = "name"
assert df.ww.name == "name"
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.name == "name"
assert dropped_df.ww.schema.name == "name"
def test_set_accessor_metadata(sample_df):
df = sample_df.copy()
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata
with pytest.raises(WoodworkNotInitError, match=error):
df.ww.metadata = {"new": "metadata"}
df.ww.init()
assert df.ww.metadata == {}
df.ww.metadata = {"new": "metadata"}
assert df.ww.schema.metadata == {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
def test_set_metadata_after_init_with_metadata(sample_df):
df = sample_df.copy()
df.ww.init(table_metadata={"new": "metadata"})
assert df.ww.metadata == {"new": "metadata"}
df.ww.metadata = {"new": "new_metadata"}
assert df.ww.schema.metadata == {"new": "new_metadata"}
assert df.ww.metadata == {"new": "new_metadata"}
def test_metadata_persists_after_drop(sample_df):
df = sample_df.copy()
df.ww.init()
df.ww.metadata = {"new": "metadata"}
assert df.ww.metadata == {"new": "metadata"}
dropped_df = df.ww.drop(["id"])
assert dropped_df.ww.metadata == {"new": "metadata"}
assert dropped_df.ww.schema.metadata == {"new": "metadata"}
def test_metadata_error_on_init(sample_df):
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.init(table_metadata=123)
def test_metadata_error_on_update(sample_df):
sample_df.ww.init()
err_msg = "Table metadata must be a dictionary."
with pytest.raises(TypeError, match=err_msg):
sample_df.ww.metadata = 123
def test_accessor_physical_types_property(sample_df):
sample_df.ww.init(logical_types={"age": "Categorical"})
assert isinstance(sample_df.ww.physical_types, dict)
assert set(sample_df.ww.physical_types.keys()) == set(sample_df.columns)
for k, v in sample_df.ww.physical_types.items():
logical_type = sample_df.ww.columns[k].logical_type
if _is_koalas_dataframe(sample_df) and logical_type.backup_dtype is not None:
assert v == logical_type.backup_dtype
else:
assert v == logical_type.primary_dtype
def test_accessor_separation_of_params(sample_df):
# mix up order of acccessor and schema params
schema_df = sample_df.copy()
schema_df.ww.init(
name="test_name",
index="id",
semantic_tags={"id": "test_tag"},
time_index="signup_date",
)
assert schema_df.ww.semantic_tags["id"] == {"index", "test_tag"}
assert schema_df.ww.index == "id"
assert schema_df.ww.time_index == "signup_date"
assert schema_df.ww.name == "test_name"
def test_init_with_full_schema(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww._schema
head_df = schema_df.head(2)
assert head_df.ww.schema is None
head_df.ww.init_with_full_schema(schema=schema)
assert head_df.ww._schema is schema
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
iloc_df = schema_df.loc[[2, 3]]
assert iloc_df.ww.schema is None
iloc_df.ww.init_with_full_schema(schema=schema)
assert iloc_df.ww._schema is schema
assert iloc_df.ww.name == "test_schema"
assert iloc_df.ww.semantic_tags["id"] == {"index", "test_tag"}
# Extra parameters do not take effect
assert isinstance(iloc_df.ww.logical_types["id"], Integer)
def test_accessor_init_errors_methods(sample_df):
methods_to_exclude = ["init", "init_with_full_schema", "init_with_partial_schema"]
public_methods = [
method
for method in dir(sample_df.ww)
if is_public_method(WoodworkTableAccessor, method)
]
public_methods = [
method for method in public_methods if method not in methods_to_exclude
]
method_args_dict = {
"add_semantic_tags": [{"id": "new_tag"}],
"describe": None,
"pop": ["id"],
"describe": None,
"describe_dict": None,
"drop": ["id"],
"get_valid_mi_columns": None,
"mutual_information": None,
"mutual_information_dict": None,
"remove_semantic_tags": [{"id": "new_tag"}],
"rename": [{"id": "new_id"}],
"reset_semantic_tags": None,
"select": [["Double"]],
"set_index": ["id"],
"set_time_index": ["signup_date"],
"set_types": [{"id": "Integer"}],
"to_disk": ["dir"],
"to_dictionary": None,
"value_counts": None,
"infer_temporal_frequencies": None,
}
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for method in public_methods:
func = getattr(sample_df.ww, method)
method_args = method_args_dict[method]
with pytest.raises(WoodworkNotInitError, match=error):
if method_args:
func(*method_args)
else:
func()
def test_accessor_init_errors_properties(sample_df):
props_to_exclude = ["iloc", "loc", "schema", "_dataframe"]
props = [
prop
for prop in dir(sample_df.ww)
if is_property(WoodworkTableAccessor, prop) and prop not in props_to_exclude
]
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
for prop in props:
with pytest.raises(WoodworkNotInitError, match=error):
getattr(sample_df.ww, prop)
def test_init_accessor_with_schema_errors(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init()
schema = schema_df.ww.schema
iloc_df = schema_df.iloc[:, :-1]
assert iloc_df.ww.schema is None
error = "Provided schema must be a Woodwork.TableSchema object."
with pytest.raises(TypeError, match=error):
iloc_df.ww.init_with_full_schema(schema=int)
error = (
"Woodwork typing information is not valid for this DataFrame: "
"The following columns in the typing information were missing from the DataFrame: {'ip_address'}"
)
with pytest.raises(ValueError, match=error):
iloc_df.ww.init_with_full_schema(schema=schema)
def test_accessor_with_schema_parameter_warning(sample_df):
schema_df = sample_df.copy()
schema_df.ww.init(name="test_schema", semantic_tags={"id": "test_tag"}, index="id")
schema = schema_df.ww.schema
head_df = schema_df.head(2)
warning = (
"A schema was provided and the following parameters were ignored: index, "
"time_index, logical_types, already_sorted, semantic_tags, use_standard_tags"
)
with pytest.warns(ParametersIgnoredWarning, match=warning):
head_df.ww.init_with_full_schema(
index="ignored_id",
time_index="ignored_time_index",
logical_types={"ignored": "ltypes"},
already_sorted=True,
semantic_tags={"ignored_id": "ignored_test_tag"},
use_standard_tags={"id": True, "age": False},
schema=schema,
)
assert head_df.ww.name == "test_schema"
assert head_df.ww.semantic_tags["id"] == {"index", "test_tag"}
def test_accessor_getattr(sample_df):
schema_df = sample_df.copy()
# We can access attributes on the Accessor class before the schema is initialized
assert schema_df.ww.schema is None
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
schema_df.ww.index
schema_df.ww.init()
assert schema_df.ww.name is None
assert schema_df.ww.index is None
assert schema_df.ww.time_index is None
assert set(schema_df.ww.columns.keys()) == set(sample_df.columns)
error = re.escape("Woodwork has no attribute 'not_present'")
with pytest.raises(AttributeError, match=error):
sample_df.ww.init()
sample_df.ww.not_present
def test_getitem(sample_df):
df = sample_df
df.ww.init(
time_index="signup_date",
index="id",
name="df_name",
logical_types={"age": "Double"},
semantic_tags={"age": {"custom_tag"}},
)
assert list(df.columns) == list(df.ww.schema.columns)
subset = ["id", "signup_date"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index == "id"
assert df_subset.ww.time_index == "signup_date"
subset = ["age", "email"]
df_subset = df.ww[subset]
pd.testing.assert_frame_equal(to_pandas(df[subset]), to_pandas(df_subset))
assert subset == list(df_subset.ww._schema.columns)
assert df_subset.ww.index is None
assert df_subset.ww.time_index is None
assert isinstance(df_subset.ww.logical_types["age"], Double)
assert df_subset.ww.semantic_tags["age"] == {"custom_tag", "numeric"}
subset = df.ww[[]]
assert len(subset.ww.columns) == 0
assert subset.ww.index is None
assert subset.ww.time_index is None
series = df.ww["age"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["age"]))
assert isinstance(series.ww.logical_type, Double)
assert series.ww.semantic_tags == {"custom_tag", "numeric"}
series = df.ww["id"]
pd.testing.assert_series_equal(to_pandas(series), to_pandas(df["id"]))
assert isinstance(series.ww.logical_type, Integer)
assert series.ww.semantic_tags == {"index"}
def test_getitem_init_error(sample_df):
error = re.escape(
"Woodwork not initialized for this DataFrame. Initialize by calling DataFrame.ww.init"
)
with pytest.raises(WoodworkNotInitError, match=error):
sample_df.ww["age"]
def test_getitem_invalid_input(sample_df):
df = sample_df
df.ww.init()
error_msg = r"Column\(s\) '\[1, 2\]' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww[["email", 2, 1]]
error_msg = "Column with name 'invalid_column' not found in DataFrame"
with pytest.raises(ColumnNotPresentError, match=error_msg):
df.ww["invalid_column"]
def test_accessor_equality(sample_df):
# Confirm equality with same schema and same data
schema_df = sample_df.copy()
schema_df.ww.init()
copy_df = schema_df.ww.copy()
assert schema_df.ww == copy_df.ww
# Confirm not equal with different schema but same data
copy_df.ww.set_time_index("signup_date")
assert schema_df.ww != copy_df.ww
# Confirm not equal with same schema but different data - only pandas
loc_df = schema_df.ww.loc[:2, :]
if isinstance(sample_df, pd.DataFrame):
assert schema_df.ww != loc_df
else:
assert schema_df.ww == loc_df
def test_accessor_shallow_equality(sample_df):
metadata_table = sample_df.copy()
metadata_table.ww.init(table_metadata={"user": "user0"})
diff_metadata_table = sample_df.copy()
diff_metadata_table.ww.init(table_metadata={"user": "user2"})
assert diff_metadata_table.ww.__eq__(metadata_table, deep=False)
assert not diff_metadata_table.ww.__eq__(metadata_table, deep=True)
schema = metadata_table.ww.schema
diff_data_table = metadata_table.ww.loc[:2, :]
same_data_table = metadata_table.ww.copy()
assert diff_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.schema.__eq__(schema, deep=True)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=False)
assert same_data_table.ww.__eq__(metadata_table.ww, deep=True)
assert diff_data_table.ww.__eq__(metadata_table.ww, deep=False)
if isinstance(sample_df, pd.DataFrame):
assert not diff_data_table.ww.__eq__(metadata_table.ww, deep=True)
def test_accessor_init_with_valid_string_time_index(time_index_df):
time_index_df.ww.init(name="schema", index="id", time_index="times")
assert time_index_df.ww.name == "schema"
assert time_index_df.ww.index == "id"
assert time_index_df.ww.time_index == "times"
assert isinstance(
time_index_df.ww.columns[time_index_df.ww.time_index].logical_type, Datetime
)
def test_accessor_init_with_numeric_datetime_time_index(time_index_df):
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": Datetime})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(
name="schema", time_index="strs", logical_types={"strs": Datetime}
)
assert schema_df.ww.time_index == "ints"
assert schema_df["ints"].dtype == "datetime64[ns]"
def test_accessor_with_numeric_time_index(time_index_df):
# Set a numeric time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Integer)
assert date_col.semantic_tags == {"time_index", "numeric"}
# Specify logical type for time index on init
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="ints", logical_types={"ints": "Double"})
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
schema_df = time_index_df.copy()
schema_df.ww.init(time_index="strs", logical_types={"strs": "Double"})
date_col = schema_df.ww.columns["strs"]
assert schema_df.ww.time_index == "strs"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"time_index", "numeric"}
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="ints", logical_types={"ints": "Categorical"})
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
time_index_df.ww.init(time_index="letters", logical_types={"strs": "Integer"})
# Set numeric time index after init
schema_df = time_index_df.copy()
schema_df.ww.init(logical_types={"ints": "Double"})
assert schema_df.ww.time_index is None
schema_df.ww.set_time_index("ints")
date_col = schema_df.ww.columns["ints"]
assert schema_df.ww.time_index == "ints"
assert isinstance(date_col.logical_type, Double)
assert date_col.semantic_tags == {"numeric", "time_index"}
def test_numeric_time_index_dtypes(numeric_time_index_df):
numeric_time_index_df.ww.init(time_index="ints")
assert numeric_time_index_df.ww.time_index == "ints"
assert isinstance(numeric_time_index_df.ww.logical_types["ints"], Integer)
assert numeric_time_index_df.ww.semantic_tags["ints"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("floats")
assert numeric_time_index_df.ww.time_index == "floats"
assert isinstance(numeric_time_index_df.ww.logical_types["floats"], Double)
assert numeric_time_index_df.ww.semantic_tags["floats"] == {"time_index", "numeric"}
numeric_time_index_df.ww.set_time_index("with_null")
assert numeric_time_index_df.ww.time_index == "with_null"
assert isinstance(
numeric_time_index_df.ww.logical_types["with_null"], IntegerNullable
)
assert numeric_time_index_df.ww.semantic_tags["with_null"] == {
"time_index",
"numeric",
}
def test_accessor_init_with_invalid_string_time_index(sample_df):
error_msg = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_msg):
sample_df.ww.init(name="schema", time_index="full_name")
def test_accessor_init_with_string_logical_types(sample_df):
logical_types = {"full_name": "natural_language", "age": "Double"}
schema_df = sample_df.copy()
schema_df.ww.init(name="schema", logical_types=logical_types)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, Double)
logical_types = {
"full_name": "NaturalLanguage",
"age": "IntegerNullable",
"signup_date": "Datetime",
}
schema_df = sample_df.copy()
schema_df.ww.init(
name="schema", logical_types=logical_types, time_index="signup_date"
)
assert isinstance(schema_df.ww.columns["full_name"].logical_type, NaturalLanguage)
assert isinstance(schema_df.ww.columns["age"].logical_type, IntegerNullable)
assert schema_df.ww.time_index == "signup_date"
def test_int_dtype_inference_on_init():
df = pd.DataFrame(
{
"ints_no_nans": pd.Series([1, 2]),
"ints_nan": pd.Series([1, np.nan]),
"ints_NA": pd.Series([1, pd.NA]),
"ints_NA_specified": pd.Series([1, pd.NA], dtype="Int64"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["ints_no_nans"].dtype == "int64"
assert df["ints_nan"].dtype == "float64"
assert df["ints_NA"].dtype == "category"
assert df["ints_NA_specified"].dtype == "Int64"
def test_bool_dtype_inference_on_init():
df = pd.DataFrame(
{
"bools_no_nans": pd.Series([True, False]),
"bool_nan": pd.Series([True, np.nan]),
"bool_NA": pd.Series([True, pd.NA]),
"bool_NA_specified": pd.Series([True, pd.NA], dtype="boolean"),
}
)
df = df.loc[df.index.repeat(5)].reset_index(drop=True)
df.ww.init()
assert df["bools_no_nans"].dtype == "bool"
assert df["bool_nan"].dtype == "category"
assert df["bool_NA"].dtype == "category"
assert df["bool_NA_specified"].dtype == "boolean"
def test_str_dtype_inference_on_init():
df = pd.DataFrame(
{
"str_no_nans": pd.Series(["a", "b"]),
"str_nan": pd.Series(["a", np.nan]),
"str_NA": | pd.Series(["a", pd.NA]) | pandas.Series |
import tnkeeh
import pandas as pd
text = tnkeeh._remove_special_chars('كيف حالكم ، يا أشقاء')
expected = 'كيف حالكم يا أشقاء'
if text == expected:
print('success')
else:
print('failed')
text = tnkeeh._remove_special_chars('3 + 3 and 3 - 3', excluded_chars = ['+' , '-', '*'])
expected = '3 + 3 and 3 - 3'
if text == expected:
print('success')
else:
print('failed')
text = tnkeeh._remove_special_chars('9/8/1770', excluded_chars = ['/'])
expected = '9/8/1770'
if text == expected:
print('success')
else:
print('failed')
text = tnkeeh._remove_twitter_meta("@arthurlacoste check this link : https://lit.ly/hugeLink ! so #nsfw")
expected = " check this link : ! so "
if text == expected:
print('success')
else:
print('failed')
text = tnkeeh._remove_diacritics("وَأفْجَـعُ مَن فَقَدْنَا مَن وَّجَدْنَا قُـبَيْلَ الفَقْدِ مَفْقُـودَ الْمِثالِ يُدفِّـنُ بَعْضُنَا")
expected = "وأفجـع من فقدنا من وجدنا قـبيل الفقد مفقـود المثال يدفـن بعضنا"
if text == expected:
print('success')
else:
print('failed')
text = tnkeeh._remove_html_elements('<doc id="7" url="https://ar.wikipedia.org/wiki?curid=7" title="ماء">')
expected = ""
if text == expected:
print('success')
else:
print('failed')
text = tnkeeh._remove_repeated_chars('هههههههه')
expected = "هه"
if text == expected:
print('success')
else:
print('failed')
text = tnkeeh._remove_extra_spaces('اهلا كيف حالك')
expected = "اهلا كيف حالك"
if text == expected:
print('success')
else:
print('failed')
df = | pd.DataFrame({'col1':['السلام عليكم', 'كيًف حالكم']}) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import copy
from config import opt, dataset_files, logger
from pprint import pprint
from sklearn import metrics
def ratio(label_path):
df = pd.read_csv(label_path, sep='\t', header=None, encoding='utf-8', engine='python')
df.columns=['id','id_sub', 'label']
logger.info("预测标签比例为:")
logger.info(df['label'].value_counts())
def search_f1(y_true, y_pred):
best = 0
best_t = 0
for i in range(opt.start, opt.end):
tres = i / 100
y_pred_bin = (y_pred >= tres)
score = metrics.f1_score(y_true, y_pred_bin, average='binary')
if score > best:
best = score
best_t = tres
return best, best_t
def kfold_search_f1(kfold_path):
files = os.listdir(kfold_path)
files = [i for i in files]
i = 0
df_merged = None
for fname in files:
tmp_df = pd.read_csv(kfold_path + fname, sep='\t')
if i == 0:
df_merged = pd.read_csv(kfold_path + fname, sep='\t')
elif i > 0:
df_merged = df_merged.append(tmp_df, sort=False)
# print(df_merged.head(10))
y_true = df_merged['label']
y_pred = df_merged['softlabel']
f1_score, threshold = search_f1(y_true, y_pred)
logger.info("f1_score:{:.4f}, threshold:{:.2f}".format(f1_score, threshold))
return f1_score, threshold
def work(kfold):
count = [0, 0]
for i in kfold:
count[i] += 1
out = count.index(max(count))
return out
def vote(kfold_path):
files = os.listdir(kfold_path)
files = [i for i in files]
i = 0
df_merged = None
for fname in files:
tmp_df = pd.read_csv(kfold_path + fname, sep='\t', header=None)
tmp_df.columns = ['id','id_sub','label']
# tmp_df_left = copy.deepcopy(tmp_df[['id', 'label']])
# pprint(tmp_df.head(10))
if i == 0:
df_merged = pd.read_csv(kfold_path + fname, sep='\t', header=None)
df_merged.columns = ['id','id_sub','label']
if i > 0:
df_merged = df_merged.merge(tmp_df, how='left', on=['id', 'id_sub'])
i += 1
tmp_label = np.array(df_merged.iloc[:, 2:])
voted_label = [work(line) for line in tmp_label]
df_summit = copy.deepcopy(df_merged[['id', 'id_sub']])
df_summit['label'] = voted_label
df_summit.to_csv(kfold_path + 'vote.tsv', index=False, header=False, sep='\t')
print("Vote successful!")
def kfold_result_combined(kfold_path, pattern='vote', threshold=0.5):
files_name = os.listdir(kfold_path)
files_path = [os.path.join(kfold_path, fname) for fname in files_name]
df_merged = None
weight = []
for idx, fname in enumerate(files_path):
if files_name[idx] in ['weighted.tsv', 'vote.tsv', 'average.tsv']:
continue
tmp_df = pd.read_csv(fname, sep='\t', header=None)
tmp_df.columns = ['id','id_sub','label']
weight.append(float(files_name[idx].split('-')[2].split('_')[1]))
if df_merged is None:
df_merged = copy.deepcopy(tmp_df)
df_merged.columns = ['id','id_sub','label']
else:
df_merged = df_merged.merge(tmp_df, how='left', on=['id', 'id_sub'])
tmp_label = df_merged.iloc[:, 2:].to_numpy()
def average_result(all_result): # shape:[num_model, axis]
all_result = np.asarray(all_result, dtype=np.float)
return np.mean(all_result, axis=1)
def weighted_result(all_result, weight):
all_result = np.asarray(all_result, dtype=np.float)
return np.average(all_result, axis=1, weights=weight)
def vote_result(all_result):
all_result = np.asarray(all_result, dtype=np.int)
lens = (all_result.shape[1] + 1) // 2
all_result = np.sum(all_result, axis=1)
return [1 if ct>=lens else 0 for ct in all_result]
def threshold_split(result_data, threshold=0.5):
return list(np.array(result_data >= threshold, dtype='int'))
combined_result = {
'vote': lambda : vote_result(tmp_label),
'weighted': lambda : threshold_split(weighted_result(tmp_label, weight=weight), threshold),
'average': lambda : threshold_split(average_result(tmp_label), threshold),
}[pattern]()
df_summit = copy.deepcopy(df_merged[['id', 'id_sub']])
df_summit['label'] = combined_result
df_summit.to_csv(kfold_path + pattern + '.tsv', index=False, header=False, sep='\t')
print("{} successful!".format(pattern))
def generate_pseudo_data(test_query, test_reply, kfold_path):
# test_dataset
df_test_query = pd.read_csv(test_query, sep='\t', header=None, encoding='utf-8', engine='python')
df_test_query.columns=['id','q1']
df_test_reply = | pd.read_csv(test_reply, sep='\t', header=None, encoding='utf-8', engine='python') | pandas.read_csv |
"""
Tests for basic phygnn functionality and execution.
"""
# pylint: disable=W0613
import os
import pytest
import numpy as np
import pandas as pd
import tempfile
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import (InputLayer, Dense, Dropout, Activation,
BatchNormalization, Conv1D, Conv3D,
Flatten, LSTM)
from phygnn import PhysicsGuidedNeuralNetwork, p_fun_dummy
N = 100
A = np.linspace(-1, 1, N)
B = np.linspace(-1, 1, N)
A, B = np.meshgrid(A, B)
A = np.expand_dims(A.flatten(), axis=1)
B = np.expand_dims(B.flatten(), axis=1)
Y = np.sqrt(A ** 2 + B ** 2)
X = np.hstack((A, B))
P = X.copy()
Y_NOISE = Y * (1 + (np.random.random(Y.shape) - 0.5) * 0.5) + 0.1
HIDDEN_LAYERS = [{'units': 64, 'activation': 'relu', 'name': 'relu1'},
{'units': 64, 'activation': 'relu', 'name': 'relu2'},
]
def p_fun_pythag(model, y_true, y_predicted, p):
"""Example function for loss calculation using physical relationships.
Parameters
----------
model : PhysicsGuidedNeuralNetwork
Instance of the phygnn model at the current point in training.
y_true : np.ndarray
Known y values that were given to the PhyGNN fit method.
y_predicted : tf.Tensor
Predicted y values in a 2D tensor based on x values in this batch.
p : np.ndarray
Supplemental physical feature data that can be used to calculate a
y_physical value to compare against y_predicted. The rows in this
array have been carried through the batching process alongside y_true
and the features used to create y_predicted and so can be used 1-to-1
with the rows in y_predicted and y_true.
Returns
-------
p_loss : tf.Tensor
A 0D tensor physical loss value.
"""
p = tf.convert_to_tensor(p, dtype=tf.float32)
y_physical = tf.sqrt(p[:, 0]**2 + p[:, 1]**2)
y_physical = tf.expand_dims(y_physical, 1)
p_loss = tf.math.reduce_mean(tf.math.abs(y_predicted - y_physical))
return p_loss
def p_fun_bad(model, y_true, y_predicted, p):
"""This is an example of a poorly formulated p_fun() that uses
numpy operations."""
y_physical = p[:, 0]**2 + p[:, 1]**2
p_loss = np.mean(np.abs(y_predicted.numpy() - y_physical))
p_loss = tf.convert_to_tensor(p_loss, dtype=tf.float32)
return p_loss
def test_nn():
"""Test the basic NN operation of the PGNN without weighting pfun."""
PhysicsGuidedNeuralNetwork.seed(0)
model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
hidden_layers=HIDDEN_LAYERS,
loss_weights=(1.0, 0.0),
n_features=2, n_labels=1,
feature_names=['a', 'b'],
output_names=['c'])
model.fit(X, Y_NOISE, P, n_batch=4, n_epoch=20)
test_mae = np.mean(np.abs(model.predict(X) - Y))
assert len(model.layers) == 6
assert len(model.weights) == 6
assert len(model.history) == 20
assert model.history.validation_loss.values[-1] < 0.15
assert test_mae < 0.15
def test_phygnn():
"""Test the operation of the PGNN with weighting pfun."""
PhysicsGuidedNeuralNetwork.seed(0)
model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
hidden_layers=HIDDEN_LAYERS,
loss_weights=(0.0, 1.0),
n_features=2, n_labels=1)
model.fit(X, Y_NOISE, P, n_batch=4, n_epoch=20)
test_mae = np.mean(np.abs(model.predict(X) - Y))
assert len(model.layers) == 6
assert len(model.weights) == 6
assert len(model.history) == 20
assert isinstance(model.layers[0], InputLayer)
assert isinstance(model.layers[1], Dense)
assert isinstance(model.layers[2], Activation)
assert isinstance(model.layers[3], Dense)
assert isinstance(model.layers[4], Activation)
assert isinstance(model.layers[5], Dense)
assert model.history.validation_loss.values[-1] < 0.015
assert test_mae < 0.015
def test_df_input():
"""Test the operation of the PGNN with labeled input dataframes."""
PhysicsGuidedNeuralNetwork.seed(0)
model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
hidden_layers=HIDDEN_LAYERS,
loss_weights=(0.0, 1.0),
n_features=2, n_labels=1)
x_df = pd.DataFrame(X, columns=('a', 'b'))
y_df = pd.DataFrame(Y_NOISE, columns=('c',))
p_df = pd.DataFrame(P, columns=('a', 'b'))
model.fit(x_df, y_df, p_df, n_batch=1, n_epoch=2)
assert model.feature_names == ['a', 'b']
assert model.output_names == ['c']
x_df_bad = pd.DataFrame(X, columns=('x1', 'x2'))
y_df_bad = | pd.DataFrame(Y_NOISE, columns=('y',)) | pandas.DataFrame |
from django.http import JsonResponse
import pandas as pd
import numpy as np
import json
from django.views.decorators.csrf import csrf_protect
import os # os.getcwd()
df_comPreRequisitos = pd.read_csv('data_science/disciplinas_prerequisitosnome.csv')
df_turmas2015 = pd.read_csv('data_science/turmas_new.csv')
def dataFrameToJson(dataFrame):
dataFrame = dataFrame.to_json(orient='records')
dataFrame = json.loads(dataFrame)
return dataFrame
# Retorna as disciplinas e seus respectivos pre-requisito informando o periodo
@csrf_protect
def disciplinasPeriodo(request):
periodo = int(request.GET.get('periodo'))
df_retorno = df_comPreRequisitos[df_comPreRequisitos['periodo']==periodo]
if(periodo == 0):
df_retorno = df_comPreRequisitos['nome']
return JsonResponse({'results':dataFrameToJson(df_retorno)})
# Retorna o desvio padrão da disciplina
@csrf_protect
def desvioPadrao(request):
disciplina = request.GET.get('disciplina')
data = df_turmas2015[df_turmas2015['nome'] == disciplina].media_final
media = data.mean()
soma = 0
soma += ((data.map(int) - media) ** 2).sum()
variancia = soma / (len(data) - 1)
desvio = variancia ** 0.5
return JsonResponse({'Desvio padrão': desvio})
# Retorna a média da disciplina
@csrf_protect
def media(request):
disciplina = request.GET.get('disciplina')
data = df_turmas2015[df_turmas2015['nome'] == disciplina].media_final
media = data.mean()
return JsonResponse({'Media': media})
# Retorna as notas da disciplima
@csrf_protect
def notas(request):
disciplina = request.GET.get('disciplina')
colunas = ['discente', 'id_turma', 'media_final', 'nome']
df = df_turmas2015[colunas].drop_duplicates()
if(disciplina==""):
notas = df[['nome', 'media_final']]
else:
notas = df[df['nome'] == disciplina].media_final
return JsonResponse({'Notas': dataFrameToJson(notas)})
# Retorna as notas da disciplima
@csrf_protect
def notasFiltro(request):
disciplina = request.GET.get('disciplina')
filtro = int(request.GET.get('filtro'))
notas = df_turmas2015[df_turmas2015['nome'] == disciplina].media_final
notas = notas[notas>= filtro]
return JsonResponse({'Notas': dataFrameToJson(notas)})
# Retorna (int) correlação entre duas disciplinas informadas
def simpleCorrelacao(discA,discB):
dataFrame = df_turmas2015
dataFrameA = dataFrame[dataFrame['nome'] == discA]
dataFrameB = dataFrame[dataFrame['nome'] == discB]
# Aprovados no DiscA
dataFrameA = dataFrameA[dataFrameA['descricao'].str.contains('APROVADO')]
series_aprovados = dataFrameA.discente.unique()
df_finalB = dataFrameB[dataFrameB.discente.isin(series_aprovados)]
df_finalB = df_finalB.groupby('discente').periodoano.min().reset_index()
df_finalB = pd.merge(df_finalB, dataFrameB, on=["discente","periodoano"])
colunas = ['discente', 'media_final', 'nome']
dataFrameA = dataFrameA[colunas]
df_finalB = df_finalB[colunas]
conc = pd.concat([dataFrameA, df_finalB])
df = pd.crosstab(conc.discente, conc.nome, conc.media_final, aggfunc=np.mean)
df = df.dropna()
df_correlacao = df.corr()
# return JsonResponse({'results': df_correlacao[discA][discB] })
return df_correlacao[discA][discB]
# Calcula a correlação de uma lista de disciplinas
@csrf_protect
def correlacao(request):
args = request.GET.get('lista')
lista_disciplinas = args.split(',')
# matriz de zeros
w, h = len(lista_disciplinas), len(lista_disciplinas)
content = [[0] * w for i in range(h)]
correlacoes = np.array(content, dtype='f')
# calculo das relacões sem repetição
for i in range(0, len(lista_disciplinas)):
for j in range(0, len(lista_disciplinas)):
if i == j:
correlacoes[i][j] = 1
if i < j:
correlacoes[i][j] = simpleCorrelacao(lista_disciplinas[i], lista_disciplinas[j])
df_retorno = pd.DataFrame(correlacoes, columns=lista_disciplinas)
# df_retorno = df_retorno.set_axis(lista_disciplinas, axis=0, inplace=False)
return JsonResponse({'results':dataFrameToJson(df_retorno)})
# Retorna
@csrf_protect
def coordenadasParalelas(request):
args = request.GET.get('lista')
lista_disciplinas = args.split(',')
dataFrame = df_turmas2015
# Contando reprovações de media_final notnull
df_contagemRep = dataFrame[dataFrame['descricao'].str.contains('REPROVADO')]
df_contagemRep = df_contagemRep[df_contagemRep.media_final.notnull()]
colunas_1 = ['descricao', 'discente', 'media_final', 'id_turma', 'nome']
df_contagemRep = df_contagemRep[colunas_1].drop_duplicates()
df_contagemRep = df_contagemRep[df_contagemRep['nome'].isin(lista_disciplinas)]
df_contagemRep = df_contagemRep.groupby(['discente']).descricao.count().reset_index()
# Aprovados e não foram reprovados
series_Rep = df_contagemRep['discente']
df_NRep = dataFrame[dataFrame['descricao'].str.contains('APROVADO')]
# tirando os reprovados
df_NRep = df_NRep[~df_NRep['discente'].isin(series_Rep)]
df_NRep = df_NRep[df_NRep.media_final.notnull()]
colunas_2 = ['descricao', 'discente', 'media_final', 'id_turma', 'nome']
df_NRep = df_NRep[colunas_2].drop_duplicates()
df_NRep = df_NRep[df_NRep['nome'].isin(lista_disciplinas)]
# junta APROVADOS e REPROVADOS
aprovados = pd.DataFrame()
aprovados['discente'] = df_NRep['discente']
aprovados['descricao'] = df_NRep['descricao']
aprovados = aprovados.replace('APROVADO', 0)
aprovados = aprovados.replace('APROVADO POR NOTA', 0)
df_contagem = pd.concat([df_contagemRep, aprovados])
colunas = ['discente', 'nome', 'media_final']
# tirando duplicados e NaN
grafico = dataFrame[colunas].drop_duplicates().dropna()
grafico = grafico[grafico['nome'].isin(lista_disciplinas)]
df_grafico = pd.crosstab(grafico.discente, grafico.nome, grafico.media_final, aggfunc=np.max).reset_index()
df_grafico = | pd.merge(df_grafico, df_contagem, on='discente', how='left') | pandas.merge |
import pandas as pd
import itertools
import numpy as np
import os
def reconfigure_timeseries(timeseries, offset_column, feature_column=None, test=False):
if test:
timeseries = timeseries.iloc[0:5000] # for testing purposes
timeseries.set_index(['patientunitstayid', pd.to_timedelta(timeseries[offset_column], unit='T')], inplace=True)
timeseries.drop(columns=offset_column, inplace=True)
if feature_column is not None:
timeseries = timeseries.pivot_table(columns=feature_column, index=timeseries.index)
# convert index to multi-index with both patients and timedelta stamp
timeseries.index = pd.MultiIndex.from_tuples(timeseries.index, names=['patient', 'time'])
return timeseries
def resample_and_mask(timeseries, eICU_path, header, mask_decay=True, decay_rate = 4/3, test=False,
verbose=False):
if verbose:
print('Resampling to 1 hour intervals...')
# take the mean of any duplicate index entries for unstacking
timeseries = timeseries.groupby(level=[0, 1]).mean()
# put patient into columns so that we can round the timedeltas to the nearest hour and take the mean in the time interval
unstacked = timeseries.unstack(level=0)
del (timeseries)
unstacked.index = unstacked.index.ceil(freq='H')
resampled = unstacked.resample('H', closed='right', label='right').mean()
del (unstacked)
# store which values had to be imputed
if mask_decay:
if verbose:
print('Calculating mask decay features...')
mask_bool = resampled.notnull()
mask = mask_bool.astype(int)
mask.replace({0: np.nan}, inplace=True) # so that forward fill works
inv_mask_bool = ~mask_bool
count_non_measurements = inv_mask_bool.cumsum() - \
inv_mask_bool.cumsum().where(mask_bool).ffill().fillna(0)
mask = mask.ffill().fillna(0) / (count_non_measurements * decay_rate).replace(0, 1)
mask = mask.iloc[-24:]
del (mask_bool, inv_mask_bool, count_non_measurements)
else:
if verbose:
print('Calculating binary mask features...')
mask = resampled.iloc[-24:].notnull()
mask = mask.astype(int)
if verbose:
print('Filling missing data forwards...')
# carry forward missing values (note they will still be 0 in the nulls table)
resampled = resampled.fillna(method='ffill').iloc[-24:]
# simplify the indexes of both tables
resampled.index = list(range(1, 25))
mask.index = list(range(1, 25))
if verbose:
print('Filling in remaining values with zeros...')
resampled.fillna(0, inplace=True)
if verbose:
print('Reconfiguring and combining features with mask features...')
# pivot the table around to give the final data
resampled = resampled.stack(level=1).swaplevel(0, 1).sort_index(level=0)
mask = mask.stack(level=1).swaplevel(0, 1).sort_index(level=0)
# rename the columns in pandas for the mask so it doesn't complain
mask.columns = [str(col) + '_mask' for col in mask.columns]
# merge the mask with the features
final = pd.concat([resampled, mask], axis=1)
if verbose:
print('Saving progress...')
# save to csv
if test is False:
final.to_csv(eICU_path + 'preprocessed_timeseries.csv', mode='a', header=header)
return
def gen_patient_chunk(patients, merged, size=500):
it = iter(patients)
chunk = list(itertools.islice(it, size))
while chunk:
yield merged.loc[chunk]
chunk = list(itertools.islice(it, size))
def gen_timeseries_file(eICU_path, test=False):
print('==> Loading data from timeseries files...')
timeseries_lab = pd.read_csv(eICU_path + 'timeserieslab.csv')
timeseries_resp = pd.read_csv(eICU_path + 'timeseriesresp.csv')
timeseries_periodic = pd.read_csv(eICU_path + 'timeseriesperiodic.csv')
timeseries_aperiodic = pd.read_csv(eICU_path + 'timeseriesaperiodic.csv')
print('==> Reconfiguring lab timeseries...')
timeseries_lab = reconfigure_timeseries(timeseries_lab,
offset_column='labresultoffset',
feature_column='labname',
test=test)
timeseries_lab.columns = timeseries_lab.columns.droplevel()
print('==> Reconfiguring respiratory timeseries...')
# get rid of % signs (found in FiO2 section) and then convert into numbers
timeseries_resp = timeseries_resp.replace('%', '', regex=True)
timeseries_resp['respchartnumeric'] = [float(value) for value in timeseries_resp.respchartvalue.values]
timeseries_resp.drop(columns='respchartvalue', inplace=True)
timeseries_resp = reconfigure_timeseries(timeseries_resp,
offset_column='respchartoffset',
feature_column='respchartvaluelabel',
test=test)
timeseries_resp.columns = timeseries_resp.columns.droplevel()
print('==> Reconfiguring aperiodic timeseries...')
timeseries_aperiodic = reconfigure_timeseries(timeseries_aperiodic,
offset_column='observationoffset',
test=test)
print('==> Reconfiguring periodic timeseries...')
timeseries_periodic = reconfigure_timeseries(timeseries_periodic,
offset_column='observationoffset',
test=test)
print('==> Combining data together...')
merged = timeseries_lab.append(timeseries_resp, sort=False)
merged = merged.append(timeseries_periodic, sort=False)
merged = merged.append(timeseries_aperiodic, sort=True)
print('==> Normalising...')
# all if not all are not normally distributed
quantiles = merged.quantile([0.05, 0.95])
# minus the 'minimum' value and then divide by the 'maximum' value (in a way that is resistant to outliers)
merged -= quantiles.loc[0.05]
merged /= quantiles.loc[0.95]
patients = merged.index.unique(level=0)
gen_chunks = gen_patient_chunk(patients, merged)
header = True # for the first chunk include the header in the csv file
i = 500
print('==> Initiating main processing loop...')
for patient_chunk in gen_chunks:
resample_and_mask(patient_chunk, eICU_path, header, mask_decay=True, decay_rate=4/3,
test=test, verbose=False)
print('==> Processed ' + str(i) + ' patients...')
i += 500
header = False
return
def add_time_of_day(processed_timeseries, flat_features):
print('==> Adding time of day features...')
processed_timeseries = processed_timeseries.join(flat_features[['hour']], how='inner', on='patient')
processed_timeseries['hour'] = processed_timeseries['time'] + processed_timeseries['hour']
hour_list = np.linspace(0,1,24) # make sure it's still scaled well
processed_timeseries['hour'] = processed_timeseries['hour'].apply(lambda x: hour_list[x - 24])
return processed_timeseries
def further_processing(eICU_path, test=False):
processed_timeseries = | pd.read_csv(eICU_path + 'preprocessed_timeseries.csv') | pandas.read_csv |
#-*- coding: utf-8 -*-
#!usr/bin/env python
"""
SPACE GROUP a-CNN
filneame: space_group_a_CNN.py version: 1.0
dependencies:
autoXRD version 1.0
autoXRD_vis version 0.2
Code to perform classification of XRD patterns for various spcae-group using
physics-informed data augmentation and all convolutional neural network (a-CNN).
Code to plot class activation maps from a-CNN and global average pooling layer
@authors: <NAME> and <NAME>
MIT Photovoltaics Laboratory / Singapore and MIT Alliance for Research and Tehcnology
All code is under Apache 2.0 license, please cite any use of the code as explained
in the README.rst file, in the GitHub repository.
"""
#################################################################
#Libraries and dependencies
#################################################################
# Loads series of functions for preprocessing and data augmentation
from autoXRD import *
# Loads CAMs visualizations for a-CNN
from autoXRD_vis import *
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn import metrics
from sklearn.model_selection import KFold
# Neural networks uses Keran with TF background
import keras as K
from keras.models import Model
from keras.models import Sequential
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import EarlyStopping
import tensorflow as tf
# Clear Keras and TF session, if run previously
K.backend.clear_session()
tf.reset_default_graph()
# Training Parameters
BATCH_SIZE=128
# Network Parameters
n_input = 1200 # Total angles in XRD pattern
n_classes = 7 # Number of space-group classes
filter_size = 2
kernel_size = 10
################################################################
# Load data and preprocess
################################################################
# Load simulated and anonimized dataset
import os
dirname = os.path.dirname(__file__)
theor = pd.read_csv(os.path.join(dirname, 'Datasets/theor.csv'), index_col=0)
theor = theor.iloc[1:,]
theor_arr=theor.values
# Normalize data for training
ntheor = normdata(theor_arr)
# Load labels for simulated data
label_theo = pd.read_csv(os.path.join(dirname, 'Datasets/label_theo.csv'), header=None, index_col=0)
label_theo = label_theo[1].tolist()
# Load experimental data as dataframe
exp_arr_new = pd.read_csv(os.path.join(dirname, 'Datasets/exp.csv'), index_col=0)
exp_arr_new = exp_arr_new.values
# Load experimental class labels
label_exp= pd.read_csv(os.path.join(dirname, 'Datasets/label_exp.csv'), index_col=0).values
label_exp = label_exp.reshape([len(label_exp),])
# Load class enconding
space_group_enc = pd.read_csv(os.path.join(dirname, 'Datasets/encoding.csv'), index_col=0)
space_group_enc = list(space_group_enc['0'])
# Normalize experimental data
nexp = normdata(exp_arr_new)
# Define spectral range for data augmentation
exp_min = 0
exp_max = 1200
theor_min = 125
#window size for experimental data extraction
window = 20
theor_max = theor_min+exp_max-exp_min
# Preprocess experimental data
post_exp = normdatasingle(exp_data_processing (nexp, exp_min, exp_max, window))
################################################################
# Perform data augmentation
################################################################
# Specify how many data points we augmented
th_num = 2000
# Augment data, this may take a bit
augd,pard,crop_augd = augdata(ntheor, th_num, label_theo, theor_min, theor_max)
# Enconde theoretical labels
label_t=np.zeros([len(pard),])
for i in range(len(pard)):
label_t[i]=space_group_enc.index(pard[i])
# Input the num of experimetal data points
exp_num =88
# Prepare experimental arrays for training and testing
X_exp = np.transpose(post_exp[:,0:exp_num])
y_exp = label_exp[0:exp_num]
# Prepare simulated arrays for training and testing
X_th = np.transpose(crop_augd )
y_th = label_t
################################################################
# Perform training and cross-validation
################################################################
fold = 5 # Number of k-folds
k_fold = KFold(n_splits=fold, shuffle=True, random_state=3)
# Create arrays to populate metrics
accuracy_exp = np.empty((fold,1))
accuracy_exp_b = np.empty((fold,1))
accuracy_exp_r1 = np.empty((fold,1))
accuracy_exp_p1 = np.empty((fold,1))
accuracy_exp_r2 = np.empty((fold,1))
accuracy_exp_p2 = np.empty((fold,1))
f1=np.empty((fold,1))
f1_m=np.empty((fold,1))
# Create auxiliary arrays
accuracy=[]
logs=[]
ground_truth=[]
predictions_ord=[]
trains=[]
tests=[]
trains_combine=[]
trains_y=[]
# Run cross validation and define a-CNN each time in loop
for k, (train, test) in enumerate(k_fold.split(X_exp, y_exp)):
#Save splits for later use
trains.append(train)
tests.append(test)
#Data augmentation of experimental traning dataset, we
# already removed the experimental training dataset
temp_x = X_exp[train]
temp_y = y_exp[train]
exp_train_x,exp_train_y = exp_augdata(temp_x.T,5000,temp_y)
# Combine theoretical and experimenal dataset for training
train_combine = np.concatenate((X_th,exp_train_x.T))
trains_combine.append(train_combine)
# Clear weights and networks state
K.backend.clear_session()
# Network Parameters
BATCH_SIZE=128
n_input = 1200 # MNIST data input (img shape: 28*28)
n_classes = 7 # MNIST total classes (0-9 digits)
filter_size = 2
kernel_size = 10
enc = OneHotEncoder(sparse=False)
train_dim = train_combine.reshape(train_combine.shape[0],1200,1)
train_y = np.concatenate((y_th,exp_train_y))
trains_y.append(train_y)
train_y_hot = enc.fit_transform(train_y.reshape(-1,1))
# Define network structure
model = Sequential()
model.add(K.layers.Conv1D(32, 8,strides=8, padding='same',input_shape=(1200,1), activation='relu'))
model.add(K.layers.Conv1D(32, 5,strides=5, padding='same', activation='relu'))
model.add(K.layers.Conv1D(32, 3,strides=3, padding='same', activation='relu'))
model.add(K.layers.pooling.GlobalAveragePooling1D())
model.add(K.layers.Dense(n_classes, activation='softmax'))
#Define optimizer
optimizer = K.optimizers.Adam()
# Compile model
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['categorical_accuracy'])
# Choose early stop
#early_stop = EarlyStopping(monitor='val_loss', patience=50, verbose=1, restore_best_weights=True)
# Reduce learning rate during optimization
# reduce_lr = ReduceLROnPlateau(monitor = 'loss', factor=0.5,
# patience=50, min_lr=0.00001)
# Define test data
test_x = X_exp[test]
test_x = test_x.reshape(test_x.shape[0],1200,1)
test_y = enc.fit_transform(y_exp.reshape(-1,1))[test]
# Fit model
hist = model.fit(train_dim, train_y_hot, batch_size=BATCH_SIZE, nb_epoch=100,
verbose=1, validation_data=(test_x, test_y))
# hist = model.fit(train_dim, train_y_hot, batch_size=BATCH_SIZE, nb_epoch=100,
# verbose=1, validation_data=(test_x, test_y), callbacks = [early_stop])
#
#Compute model predictions
prediction=model.predict(test_x)
#Go from one-hot to ordinal...
prediction_ord=[np.argmax(element) for element in prediction]
predictions_ord.append(prediction_ord)
# Compute accuracy, recall, precision and F1 with macro and micro averaging
accuracy_exp[k] = metrics.accuracy_score(y_exp[test], prediction_ord)
accuracy_exp_r1[k] = metrics.recall_score(y_exp[test], prediction_ord, average='macro')
accuracy_exp_r2[k] = metrics.recall_score(y_exp[test], prediction_ord, average='micro')
accuracy_exp_p1[k] = metrics.precision_score(y_exp[test], prediction_ord, average='macro')
accuracy_exp_p2[k] = metrics.precision_score(y_exp[test], prediction_ord, average='micro')
f1[k]=metrics.f1_score(y_exp[test], prediction_ord, average='micro')
f1_m[k]=metrics.f1_score(y_exp[test], prediction_ord, average='macro')
#Produce ground_truth, each list element contains array with test elements on first column with respect to X_exp and
ground_truth.append(np.concatenate([test.reshape(len(test),1),y_exp[test].reshape(len(test),1)],axis=1))
#Compute loss and accuracy for each k validation
accuracy.append(model.evaluate(test_x, test_y, verbose=0))
#Save logs
log = pd.DataFrame(hist.history)
logs.append(log)
#Save models on current folder with names subscripts 0 to 4
model.save(os.path.join(dirname, 'keras_model')+str(k)+'.h5')
#
accuracy = np.array(accuracy)
# Plot final cross validation accuracy
print ('Mean Cross-val accuracy', np.mean(accuracy[:,1]))
################################################################
# Plotting Class Activation Maps
################################################################
# Compute correctly classified and incorrectly classified cases
corrects, incorrects = find_incorrects(ground_truth,predictions_ord)
# Get dataframe of all incorrects and dataframe of all corrects
corrects_df = | pd.concat([r for r in corrects], ignore_index=False, axis=0) | pandas.concat |
import json
import multiprocessing as mp
import click
from fuzzywuzzy import fuzz
import numpy as np
import pandas as pd
RESULT_PATH = '../results'
BB_PATH = '../storage'
MSD_PATH = '../storage'
# Implements Click: a package for creating command line interfaces
@click.group()
@click.option(
'--path', default='.', help='The path where the results are stored.')
def cli(path):
global RESULT_PATH
RESULT_PATH = path
def main1():
msd_track_duplicates()
def main2():
msd = read_msd_unique_tracks()
year = read_msd_tracks_per_year()[['msd_id', 'year']]
billboard = read_billboard_tracks()
features = read_msd_feature_files()
msd = join(msd, year, on=['msd_id'])
msd = join(msd, features, on=['msd_id'])
matches = join(msd, billboard, on=['artist', 'title'])
duplicates = matches[matches.duplicated(
subset=['artist', 'title'], keep=False)]
duplicates.to_csv(RESULT_PATH + '/msd_bb_matches_duplicates.csv')
results = join(msd, billboard, on=['artist', 'title'], how='left')
duplicates = results[results.duplicated(
subset=['artist', 'title'], keep=False)]
duplicates.to_csv(RESULT_PATH + '/msd_bb_all_duplicates.csv')
@cli.command()
def match():
msd = read_msd_unique_tracks()
year = read_msd_tracks_per_year()[['msd_id', 'year']]
billboard = read_billboard_tracks()
features = read_msd_feature_files()
msd = join(msd, year, on=['msd_id'])
msd = join(msd, features, on=['msd_id'])
matches = join(msd, billboard, on=['artist', 'title'])
keep_first_duplicate(matches)
matches.to_csv(RESULT_PATH + '/msd_bb_matches.csv')
results = join(msd, billboard, on=['artist', 'title'], how='left')
keep_first_duplicate(results)
results.to_csv(RESULT_PATH + '/msd_bb_all.csv')
df_split = np.array_split(results, mp.cpu_count() * 4)
with mp.Pool() as pool:
result_entries = pool.imap_unordered(_fuzzy_match, df_split)
fuzzy_results = pd.DataFrame(
columns=list(msd.columns) + ['max_sim', 'artist_sim', 'title_sim'])
for result in result_entries:
fuzzy_results = fuzzy_results.append(
result, ignore_index=True, sort=False)
fuzzy_results.to_csv(RESULT_PATH + '/msd_bb_fuzzy_matches.csv')
fuzzy_results = fuzzy_results.loc[fuzzy_results['title_sim'] <= 40]
fuzzy_results = fuzzy_results[[
'msd_id', 'echo_nest_id', 'artist', 'title', 'year'
]]
fuzzy_results.to_csv(RESULT_PATH + '/msd_bb_non_matches.csv')
@cli.command()
def combine_lowlevel_features():
features = _combine_features(_combine_ll_features)
features.to_hdf(RESULT_PATH + '/msd_bb_ll_features.h5', 'll')
@cli.command()
def combine_highlevel_features():
features = _combine_features(_combine_hl_features)
features.to_hdf(RESULT_PATH + '/msd_bb_hl_features.h5', 'hl')
def _combine_features(combine_function):
hits = set(read_hits()['msd_id'])
non_hits = set(read_non_hits()['msd_id'])
msd_ids = hits | non_hits
all_features = pd.DataFrame()
df_split = np.array_split(list(msd_ids), mp.cpu_count() * 4)
with mp.Pool() as pool:
features = pool.imap_unordered(combine_function, df_split)
for feature in features:
all_features = all_features.append(
feature, sort=False, ignore_index=True)
return all_features
return None
def _combine_ll_features(msd_ids):
features_path = MSD_PATH # noqa E501
ll_features = pd.DataFrame()
for msd_id in msd_ids:
try:
file_id = pd.DataFrame([msd_id], columns=['msd_id'])
feature = pd.io.json.json_normalize(
_get_lowlevel_feature(features_path, msd_id))
ll_features = ll_features.append(
file_id.join(feature), sort=False, ignore_index=True)
except FileNotFoundError as error:
print(error)
return ll_features
def _combine_hl_features(msd_ids):
features_path = MSD_PATH # noqa E501
hl_features = pd.DataFrame()
for msd_id in msd_ids:
try:
file_id = pd.DataFrame([msd_id], columns=['msd_id'])
feature = pd.io.json.json_normalize(
_get_highlevel_feature(features_path, msd_id))
hl_features = hl_features.append(
file_id.join(feature), sort=False, ignore_index=True)
except FileNotFoundError as error:
print(error)
return hl_features
def _fuzzy_match(msd):
billboard = read_billboard_tracks()
results = pd.DataFrame(
columns=list(msd.columns) + ['max_sim', 'artist_sim', 'title_sim'])
for _, row_msd in msd.iterrows():
entry = {
**row_msd,
'max_sim': 0,
}
for _, row_bb in billboard.iterrows():
artist_sim, title_sim = fuzz.ratio(
row_msd['artist'], row_bb['artist']), fuzz.ratio(
row_msd['title'], row_bb['title'])
sim = fuzz.ratio(row_msd['artist'] + '|#|' + row_msd['title'],
row_bb['artist'] + '|#|' + row_bb['title'])
if sim > entry['max_sim']:
entry['max_sim'] = sim
entry['artist_sim'] = artist_sim
entry['title_sim'] = title_sim
entry['peak'] = row_bb['peak']
entry['weeks'] = row_bb['weeks']
entry = pd.Series(entry)
results = results.append(entry, ignore_index=True)
return results
def keep_first_duplicate(data):
data.drop_duplicates(
subset=['artist', 'title'], keep='first', inplace=True)
def remove_duplicates(data):
data.drop_duplicates(subset=['artist', 'title'], keep=False, inplace=True)
data.drop_duplicates(subset=['echo_nest_id'], keep=False, inplace=True)
def match_and_store_datasets(left,
right,
output_file,
how='inner',
hdf=None,
key='data'):
combined = join(left, right, on=['artist', 'title'], how=how)
if hdf:
combined.to_hdf(output_file, key=key)
else:
combined.to_csv(output_file)
def join(left, right, on, how='inner'):
return pd.merge(left, right, how=how, left_on=on, right_on=on)
def bb_track_duplicates():
bb = read_billboard_tracks()
tracks = bb.groupby(['artist', 'title'])
for index, group in tracks:
group_cnt = group.count()['peak']
if group_cnt > 1:
print(index, group_cnt)
def msd_track_duplicates():
msd = read_msd_unique_tracks()
unique_file_count = len(set(msd['msd_id']))
unique_id_count = len(set(msd['echo_nest_id']))
print(str(unique_file_count) + ',' + str(unique_id_count))
tracks = msd.groupby(['artist', 'title'])
count = 0
for index, group in tracks:
group_cnt = group.count()['msd_id']
if group_cnt > 1:
for item in group['msd_id']:
output_line = item + ',' + index
print(output_line)
count += 1
print(len(tracks), count)
def _get_highlevel_feature(features_path, msd_id):
file_suffix = '.mp3.highlevel.json'
return _load_feature(features_path, msd_id, file_suffix)
def _get_lowlevel_feature(features_path, msd_id):
file_suffix = '.mp3'
return _load_feature(features_path, msd_id, file_suffix)
def _load_feature(features_path, msd_id, file_suffix):
file_prefix = '/features_tracks_' + msd_id[2].lower() + '/'
file_name = features_path + file_prefix + msd_id + file_suffix
with open(file_name) as features:
return json.load(features)
def read_msd_tracks_per_year():
file_path = MSD_PATH + '/additional_files/tracks_per_year.txt'
return pd.read_csv(
file_path,
sep='<SEP>',
header=None,
names=['year', 'msd_id', 'artist', 'title'])
def read_msd_unique_artists():
file_path = MSD_PATH + '/additional_files/unique_tracks.txt'
return pd.read_csv(
file_path,
sep='<SEP>',
header=None,
names=['artist_id', 'mb_artist_id', 'msd_id', 'artist'])
def read_msd_unique_tracks():
file_path = MSD_PATH + '/additional_files/unique_tracks.txt'
return pd.read_csv(
file_path,
sep='<SEP>',
header=None,
names=['msd_id', 'echo_nest_id', 'artist', 'title'])
def read_msd_feature_files():
file_path = MSD_PATH + '/file_ids.csv'
return | pd.read_csv(file_path, header=None, names=['msd_id']) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 14:51:38 2020
@author: haukeh
"""
#%%Import of required packages
import numpy as np
import pandas as pd
import os
import sys
import plotly.graph_objs as go
from plotly.offline import plot
#%% Function to read results csv files
def read_csv(scen, param):
df = pd.read_csv('{}/results_csv/{}.csv'.format(scen,param))
df['pathway'] = scen
return df
#%% Function to create dictionaries containing dictionaries for each scenario that contain the results as dataframes
def build_dic(scens, params):
dic = {}
for scen in scens:
dic[scen] = {}
for scen in scens:
for param in params:
dic[scen][param] = read_csv(scen, param)
return dic
#%% Function to creat a df with the production by technology annual
def build_PbTA_df(dic):
# dic = results_dic
df = pd.DataFrame(columns=['REGION','TECHNOLOGY','FUEL','YEAR','VALUE','pathway'])
for i in dic:
df_work = dic[i]['ProductionByTechnologyAnnual']
df = df.append(df_work)
df['region'] = df['TECHNOLOGY'].apply(lambda x: x[:2])
df['fuel'] = df['TECHNOLOGY'].apply(lambda x: x[2:4])
df['tech_type'] = df['TECHNOLOGY'].apply(lambda x: x[4:6])
df['tech_spec'] = df['TECHNOLOGY'].apply(lambda x: x[2:])
df = df[(df['fuel']!='OI')
&(df['tech_type']!='00')
&((df['YEAR']==2015)|(df['YEAR']==2020)|(df['YEAR']==2030)|(df['YEAR']==2040)|(df['YEAR']==2050))]
df['unit'] = 'PJ'
return df
#%% Function to create dictionary with information
def get_facts(df):
facts_dic = {}
facts_dic['pathways'] = df.loc[:,'pathway'].unique()
facts_dic['regions'] = df.loc[:,'region'].unique()
facts_dic['unit'] = df.loc[:, 'unit'].unique()
facts_dic['regions'] = np.append(facts_dic['regions'],'EU28')
return facts_dic
#%% Dictionary of dictionaries with colour schemes
colour_schemes = dict(
dES_colours = dict(
Coal = 'rgb(0, 0, 0)',
Oil = 'rgb(121, 43, 41)',
Gas = 'rgb(86, 108, 140)',
Nuclear = 'rgb(186, 28, 175)',
Waste = 'rgb(138, 171, 71)',
Biomass = 'rgb(172, 199, 119)',
Biofuel = 'rgb(79, 98, 40)',
Hydro = 'rgb(0, 139, 188)',
Wind = 'rgb(143, 119, 173)',
Solar = 'rgb(230, 175, 0)',
Geo = 'rgb(192, 80, 77)',
Ocean ='rgb(22, 54, 92)',
Imports = 'rgb(232, 133, 2)'),
TIMES_PanEU_colours = dict(
Coal = 'rgb(0, 0, 0)',
Oil = 'rgb(202, 171, 169)',
Gas = 'rgb(102, 77, 142)',
Nuclear = 'rgb(109, 109, 109)',
Waste = 'rgb(223, 134, 192)',
Biomass = 'rgb(80, 112, 45)',
Biofuel = 'rgb(178, 191, 225)',
Hydro = 'rgb(181, 192, 224)',
Wind = 'rgb(103, 154, 181)',
Solar = 'rgb(210, 136, 63)',
Geo = 'rgb(178, 191, 225)',
Ocean ='rgb(178, 191, 225)',
Imports = 'rgb(232, 133, 2)')
)
#%% functions for returning positives and negatives
def positives(value):
return max(value, 0)
def negatives(value):
return min(value, 0)
#%% Function to create dfs with import and export of electricity for selected country
def impex(data, paths, selected_country):
df_filtered = data[(data['fuel']=='EL')
&((data['region']==selected_country)|(data['tech_type']==selected_country))
&(data['tech_type']!='00')]
countries = []
countries = list(df_filtered['region'].unique())
countries.extend(df_filtered['tech_type'].unique())
countries = list(dict.fromkeys(countries))
df_filtered = df_filtered[df_filtered['FUEL'].str.contains('|'.join(countries))]
df_filtered = df_filtered[df_filtered['FUEL'].str.contains('E1')]
years = pd.Series(df_filtered['YEAR'].unique(),name='YEAR').sort_values()
#paths = list(path_names.keys())
neighbours = []
for i in countries:
if i != selected_country:
neighbours.append(i)
dict_path = {}
links = list(df_filtered['TECHNOLOGY'].unique())
label_imp = []
label_exp = []
for n in neighbours:
label_imp.append('Import from '+n)
label_exp.append('Export to '+n)
for j in paths:
i = 0
net_imp = pd.DataFrame(index=years)
for link in links:
imp = df_filtered[(df_filtered['pathway']==j)
&(df_filtered['TECHNOLOGY']==link)
&(df_filtered['FUEL']==(selected_country+'E1'))]
if len(imp.index)<5:
imp = imp.set_index('YEAR').reindex(years).reset_index().fillna(0)
imp = imp.set_index(years)
exp = df_filtered[(df_filtered['pathway']==j)
&(df_filtered['TECHNOLOGY']==link)
&(df_filtered['FUEL']==(neighbours[i]+'E1'))]
if len(exp.index)<5:
exp = exp.set_index('YEAR').reindex(years).reset_index().fillna(0)
exp = exp.set_index(years)
net_imp[link] = imp['VALUE'] - exp['VALUE']
i += 1
net_imp_pos = pd.DataFrame(index=years,columns=links)
net_imp_neg = | pd.DataFrame(index=years,columns=links) | pandas.DataFrame |
import numpy as np
import pandas as pd
from ml_recsys_tools.data_handlers.interactions_with_features import ItemsHandler, ObsWithFeatures
from ml_recsys_tools.recommenders.recommender_base import BaseDFSparseRecommender
from ml_recsys_tools.utils.geo import ItemsGeoMap, PropertyGeoMap
class ItemsGeoMapper:
score_col = '_score'
def __init__(self, items_handler: ItemsHandler, map: ItemsGeoMap):
self.map = map
self.items_handler = items_handler
def join_scores_list(self, items_df, item_ids, scores):
return items_df. \
set_index(self.items_handler.item_id_col). \
join(pd.Series(scores, index=item_ids, name=self.score_col)). \
reset_index().\
sort_values(self.score_col, ascending=False)
def _scale_scores_as_marker_sizes(self, scores, min_size=5):
scale_scores = np.array(scores) ** 2
scale_scores -= scale_scores.min()
scale_scores *= 10 / scale_scores.max()
scale_scores += min_size
scale_scores = [int(v) for v in scale_scores]
return scale_scores
def _assign_data_to_map(self, df):
df = df.set_index(self.items_handler.item_id_col).drop_duplicates()
self.map.df_items = df
def map_recommendations(self, train_ids, reco_ids, scores,
test_ids=(), **kwargs):
return self.map_items_and_scores(
source_ids=train_ids,
result_ids=reco_ids,
test_ids=test_ids,
result_scores=scores,
**kwargs)
def map_similar_items(self, source_id, similar_ids, scores, **kwargs):
return self.map_items_and_scores(
source_ids=[source_id],
result_ids=similar_ids,
result_scores=scores,
**kwargs)
def map_items_and_scores(self, source_ids, result_ids, result_scores,
test_ids=(), color=(210, 20, 210), marker_size=5):
return self.map_recommendation_variants(
train_ids=source_ids,
test_ids=test_ids,
recs_variants=[result_ids],
scores_lists=[result_scores],
colors=[color],
marker_size=marker_size)
def map_recommendation_variants(
self, train_ids, test_ids, recs_variants,
colors=None, scores_lists=None, marker_size=5):
self.map.reset_map()
train_items = self.items_handler.items_filtered_by_ids(train_ids)
test_items = self.items_handler.items_filtered_by_ids(test_ids)
rec_items_dfs = [self.items_handler.items_filtered_by_ids(rec) for rec in recs_variants]
all_data = pd.concat(rec_items_dfs + [train_items, test_items], sort=False)
self._assign_data_to_map(all_data)
if colors is None:
scores_lists = [None] * len(recs_variants)
if colors is None:
colors = self.map.get_n_spaced_colors(len(recs_variants))
for rec_items, rec_ids, scores, color in zip(rec_items_dfs, recs_variants, scores_lists, colors):
scale_scores = None
if scores is not None:
rec_items = self.join_scores_list(rec_items,
item_ids=rec_ids,
scores=scores)
scale_scores = self._scale_scores_as_marker_sizes(
scores=rec_items[self.score_col].fillna(0).values, min_size=marker_size)
self.map.add_markers(rec_items, color=color, size=scale_scores or marker_size, fill=False)
self.map.add_heatmap(rec_items, color=color, sensitivity=1, opacity=0.4, spread=50)
if len(train_items):
self.map.add_markers(train_items, color='black', size=6)
if len(test_items):
self.map.add_markers(test_items, color='gray', size=6)
return self.map
class ObsItemsGeoMapper(ItemsGeoMapper):
def __init__(self, obs_handler: ObsWithFeatures, map: ItemsGeoMap):
super().__init__(map=map, items_handler=obs_handler)
self.obs_handler = obs_handler
def map_items_for_user(self, user):
self.map.df_items = self.obs_handler.get_items_df_for_user(user)
return self.map
def map_items_by_common_items(self, item_id, default_marker_size=2):
self.map.reset_map()
users = self.obs_handler.items_filtered_df_obs(item_id)[self.obs_handler.uid_col].unique().tolist()
items_dfs = [self.obs_handler.get_items_df_for_user(user) for user in users]
# unite all data and get counts
all_data = pd.concat(items_dfs, sort=False)
counts = all_data[self.obs_handler.item_id_col].value_counts()
all_data = all_data.set_index(self.obs_handler.item_id_col).drop_duplicates()
all_data['counts'] = counts
all_data.reset_index(inplace=True)
self._assign_data_to_map(all_data)
# add maps for each user's history
colors = iter(self.map.get_n_spaced_colors(len(items_dfs)))
for df in items_dfs:
color = next(colors)
self.map.add_markers(df, color=color, size=default_marker_size)
self.map.add_heatmap(df, color=color, sensitivity=1, opacity=0.4)
# add common items
common_items = all_data[all_data['counts'].values > 1]
sizes = list(map(int, np.sqrt(common_items['counts'].values) + default_marker_size))
self.map.add_markers(common_items, color='white', size=sizes)
return self.map
def map_cluster_labels(self, df_items=None, sample_n=1000):
self.map.reset_map()
if df_items is None:
df_items = self.obs_handler.df_items
unique_labels = df_items[self.obs_handler.cluster_label_col].unique()
items_dfs = [df_items[df_items[self.obs_handler.cluster_label_col] == label].sample(sample_n)
for label in unique_labels]
self._assign_data_to_map( | pd.concat(items_dfs, sort=False) | pandas.concat |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, delta, box):
# only test adding/sub offsets as + is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + delta
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, delta, box):
# only test adding/sub offsets as - is now numeric
if box is pd.DataFrame and isinstance(delta, pd.DateOffset):
pytest.xfail(reason="Returns object dtype instead of m8[ns]")
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="Index fails to return "
"NotImplemented on "
"reverse op",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box_df_fail):
# GH#18849
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box_df_fail):
# GH#18824, GH#19744
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_df_fail):
# GH#18824
box = box_df_fail # tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="object dtype Series "
"fails to return "
"NotImplemented",
strict=True, raises=TypeError)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box):
# GH#18849
box2 = Series if box is pd.Index else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_df_fail):
# GH#18824
box = box_df_fail # DataFrame tries to broadcast incorrectly
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_df_fail):
box = box_df_fail # DataFrame op returns object instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, delta, box):
if box is pd.DataFrame and not isinstance(delta, pd.DateOffset):
pytest.xfail(reason="returns m8[ns] instead of raising")
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng * delta
def test_tdi_mul_int_array_zerodim(self, box_df_fail):
box = box_df_fail # DataFrame op returns object dtype
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 0.1))
box2 = pd.Series if box is pd.Index else box
expected = tm.box_expected(expected, box2)
result = idx * Series(rng5f + 0.1)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_df_fail):
# RangeIndex fails to return NotImplemented, for others
# DataFrame tries to broadcast incorrectly
box = box_df_fail
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__
def test_td64arr_div_nat_invalid(self, box_df_fail):
# don't allow division by NaT (maybe could in the future)
box = box_df_fail # DataFrame returns all-NaT instead of raising
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box)
with pytest.raises(TypeError):
rng / pd.NaT
def test_td64arr_div_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype instead of m8[ns]
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx / 1
tm.assert_equal(result, idx)
def test_tdi_div_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
def test_tdi_div_tdlike_scalar_with_nat(self, delta, box_df_fail):
box = box_df_fail # DataFrame op returns m8[ns] instead of float64
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng / delta
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_df_fail):
box = box_df_fail # DataFrame returns object dtype
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
result = idx // 1
tm.assert_equal(result, idx)
def test_td64arr_floordiv_tdlike_scalar(self, delta, box_df_fail):
box = box_df_fail # DataFrame returns m8[ns] instead of int64 dtype
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi // delta
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_df_fail):
# GH#19125
box = box_df_fail # DataFrame op returns m8[ns] instead of f8 dtype
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# Operations with invalid others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="__mul__ op treats "
"timedelta other as i8; "
"rmul OK",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_mul_tdscalar_invalid(self, box, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with tm.assert_raises_regex(TypeError, pattern):
td1 * scalar_td
with tm.assert_raises_regex(TypeError, pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box, one, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object-dtype",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box, two, tdser):
# GH#4521
# divide/multiply by integers
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
result = tdser / two
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('op', [operator.mul, ops.rmul])
def test_td64arr_rmul_numeric_array(self, op, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
# TODO: Make this up-casting more systematic?
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = op(vector, tdser)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_div_numeric_array(self, box, vector, dtype, tdser):
# GH#4521
# divide/multiply by integers
vector = vector.astype(dtype)
expected = Series(['2.95D', '1D 23H 12m', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box)
box = Series if (box is pd.Index and type(vector) is Series) else box
expected = tm.box_expected(expected, box)
result = tdser / vector
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
vector / tdser
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_mul_int_series(self, box, names):
# GH#19042 test for correct name attachment
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(['0days', '1day', '4days', '9days', '16days'],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser * tdi
tm.assert_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_float_series_rdiv_td64arr(self, box, names):
# GH#19042 test for correct name attachment
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
expected = Series([tdi[n] / ser[n] for n in range(len(ser))],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser.__rdiv__(tdi)
if box is pd.DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
class TestTimedeltaArraylikeInvalidArithmeticOps(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="raises ValueError "
"instead of TypeError",
strict=True))
])
def test_td64arr_pow_invalid(self, scalar_td, box):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = | tm.box_expected(td1, box) | pandas.util.testing.box_expected |
import json
import os
import uuid
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from datafetcher.date import date_generator
def fetch_data():
"""
Crawl data using scrapy.
"""
os.system("cd datafetcher && scrapy crawl port_spider -o ./cache/port_data.json")
os.system("cd datafetcher && python3 vessel.py")
os.system("cd datafetcher && python3 company.py")
def dump_json(data, path):
"""
Save JSON object to file.
:param data: dict
:param path: str
"""
with open(path, 'w') as f:
json.dump(data, f)
def load_json(path):
"""
Load JSON object from file.
:param path: str
:return data: dict
"""
with open(path, 'r') as f:
data = json.load(f)
return data
def concatenate(data):
"""
Return all the names as a single list.
:param data: dict
:return combined_data: list
"""
combined_data = []
for i in data:
combined_data += i["names"]
return combined_data
def load_data(cache_dir):
"""
Function for loading data.
:param cache_dir: str
:return port_data, vessel_data, company_data: list
"""
port_data = concatenate(load_json(os.path.join(cache_dir, 'port_data.json')))
vessel_data = load_json(os.path.join(cache_dir, 'vessel_data.json'))['names']
company_data = load_json(os.path.join(cache_dir, 'company_data.json'))['names']
return port_data, vessel_data, company_data
def rand_choose(data, num):
"""
Function of random selection of subset.
:param data: list
:param num: int
:return : list
"""
return [data[i] for i in np.random.choice(np.arange(0, len(data), 1, dtype=np.int16), size=num)]
def generate_dataset(use_cache=True):
"""
Function that combines the individual datasets.
:param use_cache: bool [True]
:return: dict
"""
cache_dir = './datafetcher/cache'
try:
os.makedirs(cache_dir)
except FileExistsError:
pass
if use_cache:
try:
port_data, vessel_data, company_data = load_data(cache_dir)
except FileNotFoundError:
fetch_data()
port_data, vessel_data, company_data = load_data(cache_dir)
else:
fetch_data()
port_data, vessel_data, company_data = load_data(cache_dir)
return {'port': rand_choose(port_data, len(vessel_data)), 'date': date_generator(len(company_data)),
'vessel': vessel_data, 'company': company_data}
def format_dataset(data):
"""
Function that formats and store the dataset into required format.
:param data: dict
"""
identifier = []
label = []
text = []
for k, v in data.items():
for i in v:
identifier.append(uuid.uuid4())
label.append(k)
text.append(i)
df = pd.DataFrame({'guid': identifier,
'label': label,
'alpha': ['a'] * len(label),
'text': text})
df_train, _df_test = train_test_split(df, test_size=0.1)
df_dev, df_test_with_label = train_test_split(_df_test, test_size=0.2)
df_test = | pd.DataFrame({'guid': df_test_with_label['guid'], 'text': df_test_with_label['text']}) | pandas.DataFrame |
import xlsxwriter
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from openpyxl import load_workbook
class GeneticAlgorithm:
"""
Class to run a genetic algorithm and create everything around it, e.g., an Excel-file with all results.
"""
def __init__(self, path_to_file, nb_epochs, force_new_init=False):
"""
Constructor for class GeneticAlgorithm.
Args:
path_to_file: Path where to save and filename of the excel-file.
nb_epochs: Number of how many epochs the neural network will be trained.
force_new_init: If True an existing excel-file in the given directory with the same filename
will be overwritten.
"""
self.path = path_to_file
self.nb_conv_layer = 5
self.nb_conv_filter = [8, 16, 32, 64, 128]
self.size_conv_filter = [3]
self.nb_epochs = nb_epochs
self.writer = None # used for writing into the excel workbook
if os.path.exists(self.path) is False or force_new_init is True:
self.__init_excel()
def __init_excel(self):
"""
Initializes an excel-file where to save the genepool and all results.
"""
workbook = xlsxwriter.Workbook(self.path)
worksheet_genomes = workbook.add_worksheet("Genome")
worksheet_genepool = workbook.add_worksheet("Genpool")
workbook.add_worksheet("Overview")
workbook.add_worksheet("Training_Detailed")
self.__init_header_genome(worksheet_genomes)
self.__init_header_genepool(worksheet_genepool)
self.__init_genpool(worksheet_genepool)
workbook.close()
book = load_workbook(self.path)
self.writer = pd.ExcelWriter(self.path, engine='openpyxl')
self.writer.book = book
self.writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
self.__init_training_detailed_sheet()
self.__init_overview()
def get_tf_model(self, model_parameters, input_shape):
"""
Converts a DataFrame with model-parameters into a trainable tensorflow-model.
"""
nb_conv_layer = model_parameters["Number_Conv_Layer"][0]
batch_norm = model_parameters["Batch_Norm"][0]
activation = model_parameters["Activation"][0]
# get correct activation function as TensorFlow-object
if activation == 'relu':
activation_obj = tf.nn.relu
elif activation == 'relu6':
activation_obj = tf.nn.relu6
else:
activation_obj = tf.nn.leaky_relu
# whether to use residual connections or not
residual_connections = model_parameters["Residual_Connections"][0]
# extract some information for each layer
nb_conv_filter = []
filter_sizes = []
max_pools = []
for i in range(1, nb_conv_layer + 1):
nb_conv_filter.append(int(model_parameters["Number_Conv_Filter_" + str(i)][0]))
filter_sizes.append(int(model_parameters["Filter_Size_" + str(i)][0]))
max_pools.append(model_parameters["Max_Pool_" + str(i)][0])
# create our tf-model and add all layers
input_layer = tf.keras.Input(input_shape)
x = input_layer
for i in range(nb_conv_layer):
downsample = tf.keras.Sequential()
downsample.add(tf.keras.layers.Conv2D(filters=nb_conv_filter[i],
kernel_size=(1, 1),
strides=1))
residual = downsample(x)
x = tf.keras.layers.Conv2D(filters=nb_conv_filter[i], kernel_size=filter_sizes[i], padding='same')(x)
if bool(batch_norm) is True:
x = tf.keras.layers.BatchNormalization()(x)
if bool(residual_connections) is True:
x = tf.keras.layers.Add()([residual, x])
x = activation_obj(x)
if bool(max_pools[i]) is True:
x = tf.keras.layers.MaxPooling2D()(x)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
output = tf.keras.layers.Dense(4, activation='softmax')(x)
model = tf.keras.Model(input_layer, output)
return model
@staticmethod
def __get_random_value(df):
"""
Gets a random value from a DataFrame-column where the value is != -1
e.g. one random value from df=[8, 16, 32, 64, 128, -1, -1, -1,...].
"""
df = np.array(df)
df = np.random.choice(df[df != -1])
return df
def create_random_model_parameters_from_genpool(self):
"""
Loads all possible parameters from the genepool and creates a random model from those.
"""
df = pd.read_excel(self.path, sheet_name="Genpool", engine='openpyxl').fillna(-1)
# extract random parameters from our genpool
random_nb_conv_layer = self.__get_random_value(df['Number_Conv_Layer'])
residual_connections = self.__get_random_value(df['Residual_Connections'])
batch_norm = self.__get_random_value(df['Batch_Norm'])
activation = self.__get_random_value(df['Activation'])
# create arrays to save the parameters which are used inside the for-loop
max_nb_conv_layer = np.array(df['Number_Conv_Layer'])
max_nb_conv_layer = np.max(max_nb_conv_layer[max_nb_conv_layer != -1])
nb_conv_filters = np.zeros(max_nb_conv_layer)
kernel_sizes = np.zeros(max_nb_conv_layer)
max_pools = []
for i in range(max_nb_conv_layer):
filters = self.__get_random_value(df['Number_Conv_Filter_'+str(i+1)])
kernel_size = self.__get_random_value(df['Filter_Size_'+str(i+1)])
max_pool = self.__get_random_value(df['Max_Pool_'+str(i+1)])
# save each parameter for each for-loop-iteration to save it later in "model_parameters"
nb_conv_filters[i] = filters
kernel_sizes[i] = kernel_size
max_pools.append(max_pool)
# save all used parameters into a dataframe to save them in our excel file later
model_parameters = self.__get_model_as_data_frame(random_nb_conv_layer, nb_conv_filters, kernel_sizes,
residual_connections, batch_norm, max_pools, activation)
return model_parameters
def crossover_best_models(self, models_summaries, nb_new_models, n_best_models, prob_mutation=5):
"""
Crossover the n_best_models into nb_new_models new models using the Fitness as criteria.
"""
models_summaries = pd.DataFrame(models_summaries)
models_summaries = models_summaries.sort_values('Fitness', ascending=False)
best_models = models_summaries[0:n_best_models]
# load genpool for random mutations
df_genpool = pd.read_excel(self.path, sheet_name="Genpool", engine='openpyxl').fillna(-1)
new_models = []
# create new models
for i in range(nb_new_models):
nb_conv_layer = np.random.choice(best_models["Number_Conv_Layer"])
if np.random.randint(0, 100) < prob_mutation:
# if mutation select a random other value (make sure to not select the previous value)
_df = df_genpool["Number_Conv_Layer"]
_nb_conv_layer = self.__get_random_value(_df.loc[(_df != nb_conv_layer) & (_df != 1)])
print(f"Mutation: {_nb_conv_layer} Conv Layer instead of {nb_conv_layer}")
nb_conv_layer = _nb_conv_layer
max_nb_conv_layer = np.array(df_genpool['Number_Conv_Layer'])
max_nb_conv_layer = np.max(max_nb_conv_layer[max_nb_conv_layer != -1])
nb_conv_filters = []
kernel_sizes = []
max_pools = []
for j in range(1, max_nb_conv_layer+1):
nb_conv_filter = np.random.choice(best_models["Number_Conv_Filter_" + str(j)])
if np.random.randint(0, 100) < prob_mutation:
_df = df_genpool["Number_Conv_Filter_" + str(j)]
_nb_conv_filter = self.__get_random_value(_df.loc[_df != nb_conv_filter])
print(f"Mutation: {_nb_conv_filter} Conv Filter instead of {nb_conv_filter}")
nb_conv_filter = _nb_conv_filter
kernel_size = np.random.choice(best_models["Filter_Size_" + str(j)])
if np.random.randint(0, 100) < prob_mutation:
_df = df_genpool["Filter_Size_" + str(j)]
if len(_df.loc[_df != -1]) > 1:
_kernel_size = self.__get_random_value(_df.loc[_df != kernel_size])
print(f"Mutation: {_kernel_size} Kernel Size instead of {kernel_size}")
kernel_size = _kernel_size
max_pool = np.random.choice(best_models["Max_Pool_" + str(j)])
if np.random.randint(0, 100) < prob_mutation:
_df = df_genpool["Max_Pool_" + str(j)]
_max_pool = self.__get_random_value(_df.loc[_df != max_pool])
print(f"Mutation: {_max_pool} Max Pool instead of {max_pool}")
max_pool = _max_pool
nb_conv_filters.append(int(nb_conv_filter))
kernel_sizes.append(int(kernel_size))
max_pools.append(max_pool)
residual_connections = np.random.choice(best_models["Residual_Connections"])
if np.random.randint(0, 100) < prob_mutation:
_df = df_genpool["Residual_Connections"]
_residual_connections = self.__get_random_value(_df.loc[_df != residual_connections])
print(f"Mutation: {_residual_connections} Residual Connections instead of {residual_connections}")
residual_connections = _residual_connections
batch_norm = np.random.choice(best_models["Batch_Norm"])
if np.random.randint(0, 100) < prob_mutation:
_df = df_genpool["Batch_Norm"]
_batch_norm = self.__get_random_value(_df.loc[_df != batch_norm])
print(f"Mutation: {_batch_norm} Batch Norm instead of {batch_norm}")
batch_norm = _batch_norm
activation = np.random.choice(best_models["Activation"])
if np.random.randint(0, 100) < prob_mutation:
_df = df_genpool["Activation"]
_activation = self.__get_random_value(_df.loc[_df != activation])
print(f"Mutation: {_activation} Activation instead of {activation}")
activation = _activation
model_parameters = self.__get_model_as_data_frame(nb_conv_layer, nb_conv_filters, kernel_sizes,
residual_connections, batch_norm, max_pools, activation)
new_models.append(model_parameters)
return new_models
@staticmethod
def __get_model_as_data_frame(nb_conv_layer, nb_conv_filters, kernel_sizes, residual_connections, batch_norm, max_pools, activation):
"""
Converts all model parameters into one single dataframe.
"""
model_parameters = pd.DataFrame({"Number_Conv_Layer": nb_conv_layer}, index=[0])
for idx, conv_filter in enumerate(nb_conv_filters):
model_parameters = pd.concat(
[model_parameters, pd.DataFrame({"Number_Conv_Filter_" + str(idx+1): conv_filter}, index=[0])], axis=1)
for idx, kernel_size in enumerate(kernel_sizes):
model_parameters = pd.concat(
[model_parameters, pd.DataFrame({"Filter_Size_" + str(idx + 1): kernel_size}, index=[0])], axis=1)
model_parameters = pd.concat([model_parameters, pd.DataFrame({"Residual_Connections": residual_connections}, index=[0])], axis=1)
model_parameters = pd.concat([model_parameters, pd.DataFrame({"Batch_Norm": batch_norm}, index=[0])], axis=1)
for idx, max_pool in enumerate(max_pools):
model_parameters = pd.concat(
[model_parameters, pd.DataFrame({"Max_Pool_" + str(idx + 1): max_pool}, index=[0])], axis=1)
model_parameters = pd.concat([model_parameters, pd.DataFrame({"Activation": activation}, index=[0])], axis=1)
return model_parameters
def save_model(self, model_summary):
"""
Saves all used model parameters as "Genome" to the excel-file.
"""
df = | pd.read_excel(self.path, sheet_name="Genome", engine='openpyxl') | pandas.read_excel |
# %% load in libraries
from bs4 import BeautifulSoup
import pandas as pd
import time
from selenium import webdriver
# %% set up selenium and login
from selenium import webdriver
driver = webdriver.Firefox()
url1 = 'https://freida.ama-assn.org/search/list?spec=43236&page=1'
driver.get(url1)
# %% get program names for each specialty
specialties = pd.read_csv('specialties.csv')
df = pd.DataFrame({'Specialty':[],'School': [], 'Link': []})
for index, row in specialties.iterrows():
urlbase = 'https://freida.ama-assn.org/search/list?spec=' + str(row[1]) + '&page='
site = 'https://freida.ama-assn.org'
for i in range(1,100):
url = urlbase + str(i)
# load page
driver.get(url)
# wait for it to load
time.sleep(3)
#soup page
soup = BeautifulSoup(driver.page_source,'html.parser')
# check for blank pages
for div in soup.find_all('div',{'class':['search-list__count']}):
count_text = div.text.strip().split()
else:
links = []
for a in soup.find_all('a',{'class':['search-result-card__title']}):
links += [site + a['href']]
schools = []
for a in soup.find_all('a',{'class':['search-result-card__title']}):
schools += [a.text]
dict_ = {'Specialty':[row[0]]*len(schools),'School': schools,'Link':links}
df_it = | pd.DataFrame(dict_) | pandas.DataFrame |
import glob
import os
import requests
import time
import sys
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
from geocoding_api_extract.utils.progress import Progress
def create_geocoding_api_request_str(street, city, state,
benchmark='Public_AR_Census2010',
vintage='Census2010_Census2010',
layers='14',
format='json') -> str:
"""Create geocoding api request str
Args:
street (str): street address
city (str): city
state (str): state as 2 digit initial
benchmark (str, optional): Defaults to 'Public_AR_Census2010'.
vintage (str, optional): Defaults to 'Census2010_Census2010'.
layers (str, optional): Defaults to '14'.
format (str, optional): Defaults to 'json'.
Returns:
str: geocoding api request string.
"""
return 'https://geocoding.geo.census.gov/geocoder/geographies/address?street=' + \
street + '&city=' + city + '&state=' + state + '&benchmark=' + benchmark + \
'&vintage=' + vintage + '&layers=' + layers + '&format=' + format
def extract_address_batch(address_batch, city, state, retries=5):
"""Extract one address batch
Args:
address_batch (list(str)): list of addresses
city (str): City
state (str): 2-digit state code
retries (int, optional): Number of time to retry the api request. Defaults to 5.
Returns:
DataFrame: result table from api extract
"""
result = {'address': address_batch,
'state': [],
'county': [],
'tract': [],
'cent_lat': [],
'cent_lon': [],
'us_zip': []}
exception = ""
for address in address_batch:
request = requests.get(
create_geocoding_api_request_str(address, city, state))
for attempt in range(retries):
try:
if request.status_code == 200 and request.json()['result']['addressMatches'] != []:
result['state'].append(request.json()['result']['addressMatches']
[0]['geographies']['Census Blocks'][0]['STATE'])
result['county'].append(request.json()['result']['addressMatches']
[0]['geographies']['Census Blocks'][0]['COUNTY'])
result['tract'].append(request.json()['result']['addressMatches']
[0]['geographies']['Census Blocks'][0]['TRACT'])
result['cent_lat'].append(request.json()['result']['addressMatches']
[0]['geographies']['Census Blocks'][0]['CENTLAT'])
result['cent_lon'].append(request.json()['result']['addressMatches']
[0]['geographies']['Census Blocks'][0]['CENTLON'])
result['us_zip'].append(request.json()['result']['addressMatches']
[0]['addressComponents']['zip'])
else:
result['state'].append("not found")
result['county'].append("not found")
result['tract'].append("not found")
result['cent_lat'].append("not found")
result['cent_lon'].append("not found")
result['us_zip'].append("not found")
except Exception as x:
print(f'BAD REQUEST: {type(x)} {x} {request}')
exception = x
# wait incrementally longer each retry
wait_time = 30 * (attempt+1)**2
print(f'Waiting {wait_time} seconds.')
time.sleep(wait_time)
else:
break
else:
# all attempts failed, log this
print(
f'API REQUEST FAILED AFTER {retries} ATTEMPTS WITH EXCEPTION: {exception} :: {request}')
empty_result = pd.DataFrame()
return empty_result
results = | pd.DataFrame(result) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 25 18:42:26 2020
@author: simon
"""
"""
This project aims to scrape and analyze customer reviews from Bestbuy.com.
Use monitors as an example.
However, the code works with any other BestBuy products too
Notice A:
The code may generate duplicated records if the "total verified purchases review is less than 21 (less than one page)"
Therefore, please select popular products to run the code or perform data cleaning afterward.
Notice B:
Bestbuy has the technology to DYNAMICLY change the elements on its website; therefore, the code cannot automatically scrape data for multiple products.
However, the code still works well with easy human intervention to solve the problem in Notice B:
Solution 1:
Scroll down the website manually and click "REVIEWS" when open a product page for the FIRST TIME
Solution 2:
Use the keyword "user-generated-content-ratings-and-reviews" to search the web script
and update the dynamic path in line #150
EXAMPLE:
dynamic_path = "//div[@id='user-generated-content-ratings-and-reviews-5a4fb66c-c665-4f28-93b8-0e11db4ee25c']"
"""
# %%%% Preliminaries and library loading
import os
import pandas as pd
import numpy as np
import re
import shelve
import time
from datetime import datetime
import operator
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LinearRegression
from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score, cross_validate, ShuffleSplit, train_test_split, GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.metrics import accuracy_score, classification_report,confusion_matrix
from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier, KNeighborsRegressor
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import plot_tree
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
from sklearn import linear_model
from sklearn.linear_model import LinearRegression, Lasso, Ridge, LassoCV, BayesianRidge
import statsmodels.formula.api as sm
# libraries to crawl websites
from bs4 import BeautifulSoup
from selenium import webdriver
from pynput.mouse import Button, Controller
#%%
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(round(height, 0)),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
#%%
pd.set_option('display.max_rows', 10)
pd.set_option('display.max_columns', 5)
pd.set_option('display.width',800)
# Please update the path before running the code
os.chdir("")
# Please update the path before running the code
path_to_driver = ""
#%%
driver = webdriver.Chrome(executable_path=path_to_driver)
#%%
"This function scrape the reviews on a given webpage"
def search_current_page():
time.sleep(4 + abs(np.random.normal(0,1))*2)
reviews = driver.find_elements_by_xpath("//div[@class='review-item-content col-xs-12 col-md-8 col-lg-9']")
for r in range(len(reviews)):
one_review = {}
one_review['scrapping_date'] = datetime.datetime.now()
one_review['url'] = driver.current_url
soup = BeautifulSoup(reviews[r].get_attribute('innerHTML'))
one_review['product_id'] = product_id_current
# Get the raw review
try:
one_review_raw = reviews[r].get_attribute('innerHTML')
except:
one_review_raw = ""
one_review['review_raw'] = one_review_raw
# Get Posted Time
try:
review_posted_time = re.findall('[A-Z][a-z][a-z][a-z]* [0-9][0-9]*, [0-9][0-9][0-9][0-9] [0-9][0-9]*:[0-9][0-9] [A-Z][A-Z]',reviews[r].get_attribute('innerHTML'))[0]
except:
review_posted_time = ""
one_review['review_posted_time'] = review_posted_time
# Get Review Title
try:
review_title = soup.find('div', attrs={'class':'review-heading'}).text[37:]
except:
review_title = ""
one_review['review_title'] = review_title
# Get Review Content
try:
review_text = soup.find('div', attrs={'class':'ugc-review-body body-copy-lg'}).text
except:
review_text = ""
one_review['review_text'] = review_text
# Get number of stars
try:
N_stars = re.findall('[0-5] out of [0-5] [Ss]tar',reviews[r].get_attribute('innerHTML'))[0][0]
except:
N_stars = ""
one_review['N_stars'] = N_stars
reviews_one_monitor.append(one_review)
return reviews_one_monitor
#reviews = driver.find_elements_by_xpath("//div[@class='review-item-content col-xs-12 col-md-8 col-lg-9']")
#reviews[0].get_attribute('innerHTML')
#re.findall('[A-Z][a-z][a-z][a-z]* [0-9][0-9], [0-9][0-9][0-9][0-9] [0-9][0-9]*:[0-9][0-9] [A-Z][A-Z]',reviews[0].get_attribute('innerHTML'))[0]
#%% 1. Data scraping
product_id = []
reviews_one_monitor = []
#%%
"""
Products included:
[A - Dell]
A1. Dell - S2319NX 23" IPS LED FHD Monitor - Black/Silver
A2. Dell - S2719DGF 27" LED QHD FreeSync Monitor (DisplayPort, HDMI) - Black
A3. Dell - 27" IPS LED FHD FreeSync Monitor - Piano Black
A4. Dell - 32" LED Curved QHD FreeSync Monitor with HDR (DisplayPort, HDMI, USB)
[B - LG]
B1. LG - 24" IPS LED FHD FreeSync Monitor - Black
B2. LG - 27UL600-W 27" IPS LED 4K UHD FreeSync Monitor with HDR (DisplayPort, HDMI) - Silver/White
B3. LG - UltraGear 27" IPS LED QHD FreeSync Monitor with HDR (HDMI) - Black
B4. LG - 34WL500-B 34" IPS LED UltraWide FHD FreeSync Monitor with HDR (HDMI) - Black
[C - HP]
C1. HP - 24f 23.8" IPS LED FHD FreeSync Monitor - Natural Silver
C2. HP - 25x 24.5" LED FHD Monitor (HDMI) - Gray/Green
C3. HP - 27f 27" IPS LED FHD FreeSync Monitor (HDMI, VGA) - Natural Silver
C4. HP - 31.5" IPS LED FHD Monitor (HDMI, VGA) - Black
[D - Samsung]
D1. Samsung - 390 Series 24" LED Curved FHD FreeSync Monitor (DVI, HDMI, VGA) - High glossy black
D2. Samsung - T55 Series 27" LED 1000R Curved FHD FreeSync Monitor (DisplayPort, HDMI, VGA)
D3. Samsung - UR55 Series 28" IPS 4K UHD Monitor - Dark Gray/Blue
D4. Samsung - UJ59 Series U32J590UQN 32" LED 4K UHD FreeSync Monitor (DisplayPort, HDMI) - Dark Gray/Blue
[E - ASUS]
E1. ASUS - 23.8" IPS LCD FHD FreeSync Gaming Monitor (DisplayPort, DVI, HDMI) - Black
E2. ASUS - VG245H 24” FHD 1ms FreeSync Console Gaming Monitor (Dual HDMI, VGA) - Black
E3. ASUS - 27" IPS LCD FHD FreeSync Gaming Monitor (DisplayPort, DVI, HDMI) - Black
E4. ASUS - ZenScreen 15.6” Portable Monitor (USB) - Dark Gray
[F - Acer]
F1. Acer - 23.6" LED FHD Monitor (DVI, HDMI, VGA) - Black
F2. Acer - 23.8" IPS LED FHD FreeSync Monitor (HDMI, VGA) - Black
F3. Acer - 27" IPS LED FHD FreeSync Monitor (HDMI, VGA) - Black
F4. Acer - Predator XB272 27" LED FHD G-SYNC Monitor (DisplayPort, HDMI, USB) - Black
Total = 24
"""
# Creating the list of links.
links_to_scrape = [
# A - Dell
'https://www.bestbuy.com/site/dell-s2319nx-23-ips-led-fhd-monitor-black-silver/6237640.p?skuId=6237640', # Missing
'https://www.bestbuy.com/site/dell-s2719dgf-27-led-qhd-freesync-monitor-displayport-hdmi-black/6293714.p?skuId=6293714',
'https://www.bestbuy.com/site/dell-27-ips-led-fhd-freesync-monitor-piano-black/6394138.p?skuId=6394138',
'https://www.bestbuy.com/site/dell-32-led-curved-qhd-freesync-monitor-with-hdr-displayport-hdmi-usb/6375331.p?skuId=6375331',
# B - LG
'https://www.bestbuy.com/site/lg-24-ips-led-fhd-freesync-monitor-black/6362423.p?skuId=6362423', # Dell
'https://www.bestbuy.com/site/lg-27ul600-w-27-ips-led-4k-uhd-freesync-monitor-with-hdr-displayport-hdmi-silver-white/6329956.p?skuId=6329956',
'https://www.bestbuy.com/site/lg-ultragear-27-ips-led-qhd-freesync-monitor-with-hdr-hdmi-black/6358119.p?skuId=6358119',
'https://www.bestbuy.com/site/lg-34wl500-b-34-ips-led-ultrawide-fhd-freesync-monitor-with-hdr-hdmi-black/6329954.p?skuId=6329954',
# C - HP
'https://www.bestbuy.com/site/hp-24f-23-8-ips-led-fhd-freesync-monitor-natural-silver/6317590.p?skuId=6317590',
'https://www.bestbuy.com/site/hp-25x-24-5-led-fhd-monitor-hdmi-gray-green/6280605.p?skuId=6280605', #LG
'https://www.bestbuy.com/site/hp-27f-27-ips-led-fhd-freesync-monitor-hdmi-vga-natural-silver/6219205.p?skuId=6219205',
'https://www.bestbuy.com/site/hp-31-5-ips-led-fhd-monitor-hdmi-vga-black/6361917.p?skuId=6361917',
# D - Samsung
'https://www.bestbuy.com/site/samsung-390-series-24-led-curved-fhd-freesync-monitor-dvi-hdmi-vga-high-glossy-black/5044601.p?skuId=5044601',
'https://www.bestbuy.com/site/samsung-t55-series-27-led-1000r-curved-fhd-freesync-monitor-displayport-hdmi-vga/6402202.p?skuId=6402202',
'https://www.bestbuy.com/site/samsung-ur55-series-28-ips-4k-uhd-monitor-dark-gray-blue/6386391.p?skuId=6386391',
'https://www.bestbuy.com/site/samsung-uj59-series-u32j590uqn-32-led-4k-uhd-freesync-monitor-displayport-hdmi-dark-gray-blue/6293716.p?skuId=6293716',
# E - ASUS
'https://www.bestbuy.com/site/asus-23-8-ips-lcd-fhd-freesync-gaming-monitor-displayport-dvi-hdmi-black/6395359.p?skuId=6395359',
'https://www.bestbuy.com/site/asus-vg245h-24-fhd-1ms-freesync-console-gaming-monitor-dual-hdmi-vga-black/5591926.p?skuId=5591926',
'https://www.bestbuy.com/site/asus-27-ips-lcd-fhd-freesync-gaming-monitor-displayport-dvi-hdmi-black/6336778.p?skuId=6336778',
'https://www.bestbuy.com/site/asus-zenscreen-15-6-portable-monitor-usb-dark-gray/6403999.p?skuId=6403999',
# F - Acer
'https://www.bestbuy.com/site/acer-23-6-led-fhd-monitor-dvi-hdmi-vga-black/6404005.p?skuId=6404005',
'https://www.bestbuy.com/site/acer-23-8-ips-led-fhd-freesync-monitor-hdmi-vga-black/6401005.p?skuId=6401005',
'https://www.bestbuy.com/site/acer-27-ips-led-fhd-freesync-monitor-hdmi-vga-black/6401007.p?skuId=6401007',
'https://www.bestbuy.com/site/acer-predator-xb272-27-led-fhd-g-sync-monitor-displayport-hdmi-usb-black/6238705.p?skuId=6238705'
]
l = 0
one_link = links_to_scrape[l]
driver.get(one_link)
#%%
# dynamic_path = "//div[@id='user-generated-content-ratings-and-reviews-86dda784-c3d4-484a-9f0a-85c24dfe94b8']"
# %%%
# Expand the reviews
# driver.find_element_by_xpath(dynamic_path).click()
# time.sleep(2)
# See all reviews
driver.find_element_by_xpath("//a[@class='btn btn-secondary v-medium see-all-reviews']").click()
time.sleep(2.3)
# Show only Verified Purchases
driver.find_element_by_xpath("//div[@class='switch']").click()
time.sleep(2.8)
#%%
product_infomration_current = driver.find_elements_by_xpath("//h2[@class='heading-6 product-title']")
# Get the product skuId
try:
product_id_current = re.findall('skuId=[0-9]{7}',product_infomration_current[0].get_attribute('innerHTML'))[0][6:]
except:
product_id_current= "Unknown"
# Append the current product to total product list
product_id.append(product_id_current)
search_finished = 0
while (search_finished != 2):
search_current_page()
# Test if there is any more review. If not, stop the while loop
# This is a trick part: Only the first and last review page has the element (//a[@aria-disabled='true'), so I could use this as a switch, but the contion to break the loop should be search_finished = 2, becasue search_finished jump from 0 to 1 on the first page
# The code will work IF the product only have one page of review. However, the code may generate duplicated records becasue it copies the first page for two times
try:
driver.find_element_by_xpath("//a[@aria-disabled='true']").get_attribute('innerHTML')
driver.find_element_by_xpath("//a[@data-track='Page next']").click()
search_finished += 1
except:
driver.find_element_by_xpath("//a[@data-track='Page next']").click()
#%%
"Only output the result when everything is compeleted"
# review_df = pd.DataFrame.from_dict(reviews_one_monitor)
# review_df.to_excel('Review_Data.xlsx')
#%% 2. Data Cleaning
review_df = pd.read_excel('Data/Review_Data.xlsx').dropna().reset_index()
# Create brand lists. Elements in the lists are product ID at BestBuy
Dell_list = [6237640, 6293714, 6394138, 6375331]
LG_list = [6362423, 6329956, 6358119, 6329954]
HP_list = [6317590, 6280605, 6219205, 6361917]
Samsung_list = [5044601, 6402202, 6386391, 6293716]
ASUS_list = [6395359, 5591926, 6336778, 6403999]
Acer_list = [6404005, 6401005, 6401007, 6238705]
review_df['brand'] = 0
for i in range(review_df.shape[0]):
try:
review_df['review_posted_time'][i] = datetime.strptime(str(review_df['review_posted_time'][i]), '%b %d, %Y %I:%M %p')
except ValueError:
review_df['review_posted_time'][i] = datetime.strptime(str(review_df['review_posted_time'][i]), '%Y-%m-%d %H:%M:%S')
if review_df['product_id'][i] in Dell_list:
review_df['brand'][i] = 'Dell'
elif review_df['product_id'][i] in LG_list:
review_df['brand'][i] = 'LG'
elif review_df['product_id'][i] in HP_list:
review_df['brand'][i] = 'HP'
elif review_df['product_id'][i] in Samsung_list:
review_df['brand'][i] = 'Samsung'
elif review_df['product_id'][i] in ASUS_list:
review_df['brand'][i] = 'ASUS'
elif review_df['product_id'][i] in Acer_list:
review_df['brand'][i] = 'Acer'
# Delete unuseful columns
review_df = review_df.drop(['Unnamed: 0','Unnamed: 0.1','index','scrapping_date'],axis = 1)
num_company = 6
company_list = ["ASUS","Acer","Dell","HP","LG","Samsung"]
#%% 3. Data Visualization
# 3.1 Reviews star distribution
star_by_brand = review_df.groupby(['brand','N_stars']).size()
star_by_brand_reshape = star_by_brand.values.reshape(num_company,5)
labels = ['1 Star', '2 Stars', '3 Stars', '4 Stars', '5 Stars']
ASUS_star = star_by_brand_reshape[0]
Acer_star = star_by_brand_reshape[1]
Dell_star = star_by_brand_reshape[2]
HP_star = star_by_brand_reshape[3]
LG_star = star_by_brand_reshape[4]
Samsung_star = star_by_brand_reshape[5]
#%% Without Sacling
x = np.arange(len(labels)) # the label locations
width = 0.15 # the width of the bars
distance_factor = 3
fig, ax = plt.subplots(figsize=(10,5))
rects1_1_1 = ax.bar(x - width * distance_factor*3/3, ASUS_star, width, label='ASUS')
rects1_1_2 = ax.bar(x - width * distance_factor*2/3, Acer_star, width, label='Acer')
rects1_1_3 = ax.bar(x - width * distance_factor*1/3, Dell_star, width, label='Dell')
rects1_1_4 = ax.bar(x + width * distance_factor*0/3, HP_star, width, label='HP')
rects1_1_5 = ax.bar(x + width * distance_factor*1/3, LG_star, width, label='LG')
rects1_1_6 = ax.bar(x + width * distance_factor*2/3, Samsung_star, width, label='Samsung')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Count')
ax.set_title('1-1 Count by Brand and Star')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
ax.grid(alpha = 0.5)
autolabel(rects1_1_1)
autolabel(rects1_1_2)
autolabel(rects1_1_3)
autolabel(rects1_1_4)
autolabel(rects1_1_5)
autolabel(rects1_1_6)
fig.tight_layout()
plt.show()
#%% With Sacling
ASUS_star_percent = star_by_brand_reshape[0] / star_by_brand_reshape[0].sum() * 100
Acer_star_percent = star_by_brand_reshape[1] / star_by_brand_reshape[1].sum() * 100
Dell_star_percent = star_by_brand_reshape[2] / star_by_brand_reshape[2].sum() * 100
HP_star_percent = star_by_brand_reshape[3] / star_by_brand_reshape[3].sum() * 100
LG_star_percent = star_by_brand_reshape[4] / star_by_brand_reshape[4].sum() * 100
Samsung_star_percent = star_by_brand_reshape[5] / star_by_brand_reshape[5].sum() * 100
fig, ax = plt.subplots(figsize=(10,5))
rects1_2_1 = ax.bar(x - width * distance_factor*3/3, ASUS_star_percent, width, label='ASUS')
rects1_2_2 = ax.bar(x - width * distance_factor*2/3, Acer_star_percent, width, label='Acer')
rects1_2_3 = ax.bar(x - width * distance_factor*1/3, Dell_star_percent, width, label='Dell')
rects1_2_4 = ax.bar(x + width * distance_factor*0/3, HP_star_percent, width, label='HP')
rects1_2_5 = ax.bar(x + width * distance_factor*1/3, LG_star_percent, width, label='LG')
rects1_2_6 = ax.bar(x + width * distance_factor*2/3, Samsung_star_percent, width, label='Samsung')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Percentage')
ax.set_title('1-2 Count by Brand and Star (Percentage)')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
ax.grid(alpha = 0.5)
autolabel(rects1_2_1)
autolabel(rects1_2_2)
autolabel(rects1_2_3)
autolabel(rects1_2_4)
autolabel(rects1_2_5)
autolabel(rects1_2_6)
fig.tight_layout()
#%% 3.2 Review By Time
review_date_df = review_df['review_posted_time']
review_date_df = review_date_df.dt.date
date_count_df = review_date_df.value_counts().sort_index()
date_count_df.plot(
kind = 'area',
title = '2 - Review Count Over Time',
figsize = (10,5),
grid = True
)
#%% 4. Data Spiltting
review_df['ML_group'] = np.random.randint(100, size = review_df.shape[0])
review_df = review_df.sort_values(by='ML_group')
# Putting structure in the text
#
corpus_review = review_df.review_text.to_list()
corpus_title = review_df.review_title.to_list()
corpus_review = [str (item) for item in corpus_review]
corpus_title = [str (item) for item in corpus_title]
vectorizer = CountVectorizer(lowercase = True,
ngram_range = (1,1),
max_df = 0.85,
min_df = 0.01)
X_review = vectorizer.fit_transform(corpus_review)
X_title = vectorizer.fit_transform(corpus_title)
print(vectorizer.get_feature_names())
print(X_review.toarray())
print(X_title.toarray())
print('Nunber of keywords selelcted for review analysis is {}'.format(len(X_review.toarray()[0])))
print('Nunber of keywords selelcted for title analysis is {}'.format(len(X_title.toarray()[0])))
# Split the data
Training_size = 80
Validation_size = 10
Testing_size = Training_size + Validation_size
df_train = review_df.ML_group < Training_size
df_valid = (review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size)
df_test = review_df.ML_group >= Testing_size
y_train = review_df.N_stars[df_train].to_list()
y_valid = review_df.N_stars[df_valid].to_list()
y_test = review_df.N_stars[df_test].to_list()
X_review_train = X_review[np.where(df_train)[0],:]
X_review_valid = X_review[np.where(df_valid)[0],:]
X_review_test = X_review[np.where(df_test)[0],:]
X_title_train = X_title[np.where(df_train)[0],:]
X_title_valid = X_title[np.where(df_valid)[0],:]
X_title_test = X_title[np.where(df_test)[0],:]
#%% 5.Building model for review analysis
#%% 5.1 Linear Regression
model = LinearRegression()
clf_review_linear = model.fit(X_review_train, y_train)
y_review_pred = clf_review_linear.predict(X_review_valid)
review_df['N_star_review_reg'] = np.concatenate(
[
clf_review_linear.predict(X_review_train),
clf_review_linear.predict(X_review_valid),
clf_review_linear.predict(X_review_test)
]
).round().astype(int)
review_df.loc[review_df['N_star_review_reg']>5,'N_star_review_reg'] = 5
review_df.loc[review_df['N_star_review_reg']<1,'N_star_review_reg'] = 1
confusion_matrix_review_linear_train = np.zeros((5,5))
confusion_matrix_review_linear_train = pd.DataFrame(confusion_matrix_review_linear_train, columns=['1 (Prediction)','2','3','4','5'],index = ['1 (Actual)','2','3','4','5'])
confusion_matrix_review_linear_valid = confusion_matrix_review_linear_train.copy()
for i in range(0,5):
for j in range(0,5):
confusion_matrix_review_linear_train.iloc[i][j] = (review_df[(review_df.N_stars == i + 1) & (review_df.N_star_review_reg == j+1) & (review_df.ML_group < Training_size)]).shape[0]
confusion_matrix_review_linear_valid.iloc[i][j] = (review_df[(review_df.N_stars == i + 1) & (review_df.N_star_review_reg == j+1) & ((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))]).shape[0]
print('Confusion matrix for linear regression on training data (review analysis)')
print(confusion_matrix_review_linear_train)
print('Confusion matrix for linear regression on validation data (review analysis)')
print(confusion_matrix_review_linear_valid)
prediction_score_review_linear_train = review_df[(review_df.N_stars == review_df.N_star_review_reg) & (review_df.ML_group < Training_size)].shape[0]/review_df[(review_df.ML_group < Training_size)].shape[0]
print("The linear model has a prediction score of {:.2f} on training set".format(prediction_score_review_linear_train))
prediction_score_review_linear_valid = review_df[(review_df.N_stars == review_df.N_star_review_reg) & ((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]/review_df[((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]
print("The linear model has a prediction score of {:.2f} on validation set".format(prediction_score_review_linear_valid))
#%% 5.2 Lasso Regression
model = linear_model.Lasso(alpha=0.1)
clf_review_Lasso = model.fit(X_review_train, y_train)
review_df['N_star_review_lasso'] = np.concatenate(
[
clf_review_Lasso.predict(X_review_train),
clf_review_Lasso.predict(X_review_valid),
clf_review_Lasso.predict(X_review_test)
]
).round().astype(int)
review_df.loc[review_df['N_star_review_lasso']>5,'N_star_review_lasso'] = 5
review_df.loc[review_df['N_star_review_lasso']<1,'N_star_review_lasso'] = 1
# Now build the confusion matrix for Lasso Regression
confusion_matrix_review_Lasso_train = np.zeros((5,5))
confusion_matrix_review_Lasso_train = pd.DataFrame(confusion_matrix_review_Lasso_train, columns=['1 (Prediction)','2','3','4','5'],index = ['1 (Actual)','2','3','4','5'])
confusion_matrix_review_Lasso_valid = confusion_matrix_review_Lasso_train.copy()
for i in range(0,5):
for j in range(0,5):
confusion_matrix_review_Lasso_train.iloc[i][j] = (review_df[(review_df.N_stars == i + 1) & (review_df.N_star_review_lasso == j+1) & (review_df.ML_group < Training_size)]).shape[0]
confusion_matrix_review_Lasso_valid.iloc[i][j] = (review_df[(review_df.N_stars == i + 1) & (review_df.N_star_review_lasso == j+1) & (review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size)]).shape[0]
print('Confusion matrix for Lasso Regression on training data (review analysis)')
print(confusion_matrix_review_Lasso_train)
print('Confusion matrix for Lasso Regression on validation data (review analysis)')
print(confusion_matrix_review_Lasso_valid)
prediction_score_review_Lasso_train = review_df[(review_df.N_stars == review_df.N_star_review_lasso) & (review_df.ML_group < Training_size)].shape[0]/review_df[(review_df.ML_group < Training_size)].shape[0]
print("The Lasso Regression model has a prediction score of {:.2f} on training set".format(prediction_score_review_Lasso_train))
prediction_score_review_Lasso_valid = review_df[(review_df.N_stars == review_df.N_star_review_lasso) & ((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]/review_df[((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]
print("The Lasso Regression model has a prediction score of {:.2f} on validation set".format(prediction_score_review_Lasso_valid))
#%% 5.3 KNN
k = 1;
results_list_knn = [];
max_k_nn = 50
for k in range(1,max_k_nn + 1):
clf_review_knn = KNeighborsClassifier(n_neighbors=k).fit(X_review_train, y_train)
results_list_knn.append(
np.concatenate(
[
clf_review_knn.predict(X_review_train),
clf_review_knn.predict(X_review_valid),
clf_review_knn.predict(X_review_test )
])
)
print('K = {} is done'.format(k))
review_results_knn = pd.DataFrame(results_list_knn).transpose()
review_results_knn['df_train'] = df_train.to_list()
review_results_knn['df_valid'] = df_valid.to_list()
review_results_knn['df_test'] = df_valid.to_list()
review_results_knn['N_stars'] = review_df['N_stars'].copy()
#%% Build a dictionary that stores the validation accuracy of each K.
knn_review_dict = {}
for i in range(1,max_k_nn):
knn_review_dict[i] = review_results_knn[(review_results_knn.N_stars == review_results_knn[i-1]) & (review_results_knn.df_valid == True)].shape[0]/review_df[(review_results_knn.df_valid == True)].shape[0]
# Rank the testing accuracy and get the best parameter setting for K
best_k_review = max(knn_review_dict.items(), key=operator.itemgetter(1))[0] + 1
print("The best parameter for k is",best_k_review,"and the best validation accuracy score is {:.2f}".format(knn_review_dict.get(best_k_review - 1)))
# Append the optimal knn result to review_df
try:
review_df.drop(['N_star_review_knn'], axis = 1)
except:
review_df.loc[:,'N_star_review_knn'] = review_results_knn.iloc[:, best_k_review -1]
# Now build the confusion matrix for the best parameter
confusion_matrix_review_knn_train = np.zeros((5,5))
confusion_matrix_review_knn_train = pd.DataFrame(confusion_matrix_review_knn_train, columns=['1 (Prediction)','2','3','4','5'],index = ['1 (Actual)','2','3','4','5'])
confusion_matrix_review_knn_valid = confusion_matrix_review_knn_train.copy()
for i in range(0,5):
for j in range(0,5):
confusion_matrix_review_knn_train.iloc[i][j] = (review_results_knn[(review_results_knn.N_stars == i + 1) & (review_results_knn[best_k_review - 1] == j+1) & (review_df.ML_group < Training_size)]).shape[0]
confusion_matrix_review_knn_valid.iloc[i][j] = (review_results_knn[(review_results_knn.N_stars == i + 1) & (review_results_knn[best_k_review - 1] == j+1) & (review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size)]).shape[0]
print('Confusion matrix for KNN (k = {}) on training data (review analysis)'.format(best_k_review))
print(confusion_matrix_review_knn_train)
print('Confusion matrix for KNN (k = {}) on validation data (review analysis)'.format(best_k_review))
print(confusion_matrix_review_knn_valid)
prediction_score_review_knn_train = review_df[(review_df.N_stars == review_df.N_star_review_knn) & (review_df.ML_group < Training_size)].shape[0]/review_df[(review_df.ML_group < Training_size)].shape[0]
print("The KNN has a prediction score of {:.2f} on training set".format(prediction_score_review_knn_train))
prediction_score_review_knn_valid = review_df[(review_df.N_stars == review_df.N_star_review_knn) & ((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]/review_df[((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]
print("The KNN has a prediction score of {:.2f} on validation set".format(prediction_score_review_knn_valid))
#%% 5.4 Naive Bayes Classification
clf_review_NB = GaussianNB().fit(X_review_train.toarray(), y_train)
review_df['N_star_review_NB'] = np.concatenate(
[
clf_review_NB.predict(X_review_train.toarray()),
clf_review_NB.predict(X_review_valid.toarray()),
clf_review_NB.predict(X_review_test.toarray( ))
]).round().astype(int)
review_df.loc[review_df['N_star_review_NB']>5,'N_star_review_NB'] = 5
review_df.loc[review_df['N_star_review_NB']<1,'N_star_review_NB'] = 1
# Now build the confusion matrix for Naive Bayes Classification
confusion_matrix_review_NB_train = np.zeros((5,5))
confusion_matrix_review_NB_train = pd.DataFrame(confusion_matrix_review_NB_train, columns=['1 (Prediction)','2','3','4','5'],index = ['1 (Actual)','2','3','4','5'])
confusion_matrix_review_NB_valid = confusion_matrix_review_NB_train.copy()
for i in range(0,5):
for j in range(0,5):
confusion_matrix_review_NB_train.iloc[i][j] = (review_df[(review_df.N_stars == i + 1) & (review_df.N_star_review_NB == j+1) & (review_df.ML_group < Training_size)]).shape[0]
confusion_matrix_review_NB_valid.iloc[i][j] = (review_df[(review_df.N_stars == i + 1) & (review_df.N_star_review_NB == j+1) & (review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size)]).shape[0]
print('Confusion matrix for Naive Bayes Classification on training data (review analysis)')
print(confusion_matrix_review_NB_train)
print('Confusion matrix for Naive Bayes Classification on validation data (review analysis)')
print(confusion_matrix_review_NB_valid)
prediction_score_review_NB_train = review_df[(review_df.N_stars == review_df.N_star_review_NB) & (review_df.ML_group < Training_size)].shape[0]/review_df[(review_df.ML_group < Training_size)].shape[0]
print("The Naive Bayes Classification model has a prediction score of {:.2f} on training set".format(prediction_score_review_NB_train))
prediction_score_review_NB_valid = review_df[(review_df.N_stars == review_df.N_star_review_NB) & ((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]/review_df[((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]
print("The Naive Bayes Classification model has a prediction score of {:.2f} on validation set".format(prediction_score_review_NB_valid))
#%% 5.5 Decision Trees
criterion_chosen = ['entropy','gini'][1]
random_state = 96
max_depth = 10
results_list_review_tree = []
for depth in range(2,max_depth + 1):
clf_review_tree = tree.DecisionTreeClassifier(
criterion = criterion_chosen,
max_depth = depth,
random_state = 96).fit(X_review_train.toarray(), y_train)
results_list_review_tree.append(
np.concatenate(
[
clf_review_tree.predict(X_review_train.toarray()),
clf_review_tree.predict(X_review_valid.toarray()),
clf_review_tree.predict(X_review_test.toarray())
]).round().astype(int)
)
tree.plot_tree(clf_review_tree)
results_review_tree = pd.DataFrame(results_list_review_tree).transpose()
results_review_tree['df_train'] = df_train.to_list()
results_review_tree['df_valid'] = df_valid.to_list()
results_review_tree['df_test'] = df_test.to_list()
results_review_tree['N_stars'] = review_df['N_stars'].copy()
#%%
# Build a dictionary that stores the testing accuracy of max_depth.
tree_review_dict = {}
for i in range(max_depth-2):
tree_review_dict[i] = results_review_tree[(results_review_tree.N_stars == results_review_tree[i]) & (results_review_tree.df_test == True)].shape[0]/review_df[(results_review_tree.df_test == True)].shape[0]
# Rank the testing accuracy and get the best parameter setting for max_depth
best_max_depth_review = max(tree_review_dict.items(), key=operator.itemgetter(1))[0] + 2
print("The best parameter for max_depth is",best_max_depth_review,"and the best testing accuracy score is {:.2f}".format(tree_review_dict.get(best_max_depth_review)))
#%%
# Append the optimal tree result to review_df
try:
review_df.drop(['N_star_review_tree'], axis = 1)
except:
review_df.loc[:,'N_star_review_tree'] = results_review_tree.iloc[:, best_max_depth_review - 2]
# Now build the confusion matrix for the best parameter
confusion_matrix_review_tree_train = np.zeros((5,5))
confusion_matrix_review_tree_train = pd.DataFrame(confusion_matrix_review_tree_train, columns=['1 (Prediction)','2','3','4','5'],index = ['1 (Actual)','2','3','4','5'])
confusion_matrix_review_tree_valid = confusion_matrix_review_tree_train.copy()
for i in range(0,5):
for j in range(0,5):
confusion_matrix_review_tree_train.iloc[i][j] = (results_review_tree[(results_review_tree.N_stars == i + 1) & (results_review_tree[best_max_depth_review - 2] == j+1) & (review_df.ML_group < Training_size)]).shape[0]
confusion_matrix_review_tree_valid.iloc[i][j] = (results_review_tree[(results_review_tree.N_stars == i + 1) & (results_review_tree[best_max_depth_review - 2] == j+1) & (review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size)]).shape[0]
print('Confusion matrix for Decision Tree (max depth = {}) on training data (review analysis)'.format(best_max_depth_review))
print(confusion_matrix_review_tree_train)
print('Confusion matrix for Decision Tree (max depth = {}) on validation data (review analysis)'.format(best_max_depth_review))
print(confusion_matrix_review_tree_valid)
prediction_score_review_tree_train = review_df[(review_df.N_stars == review_df.N_star_review_tree) & (review_df.ML_group < Training_size)].shape[0]/review_df[(review_df.ML_group < Training_size)].shape[0]
print("The Decision Tree has a prediction score of {:.2f} on training set".format(prediction_score_review_tree_train))
prediction_score_review_tree_valid = review_df[(review_df.N_stars == review_df.N_star_review_tree) & ((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]/review_df[((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]
print("The Decision Tree has a prediction score of {:.2f} on validation set".format(prediction_score_review_tree_valid))
#%% 5.6 Find optimal model: Not just based on accuracy, but confusion matrix
print("The linear model has a prediction score of {:.2f} on validation set".format(prediction_score_review_linear_valid))
print("The Lasso Regression model has a prediction score of {:.2f} on validation set".format(prediction_score_review_Lasso_valid))
print("The KNN has a prediction score of {:.2f} on validation set".format(prediction_score_review_knn_valid))
print("The Naive Bayes Classification model has a prediction score of {:.2f} on validation set".format(prediction_score_review_NB_valid))
print("The Decision Tree has a prediction score of {:.2f} on validation set".format(prediction_score_review_tree_valid))
print(confusion_matrix_review_linear_valid)
print(confusion_matrix_review_Lasso_valid)
print(confusion_matrix_review_knn_valid)
print(confusion_matrix_review_NB_valid)
print(confusion_matrix_review_tree_valid)
# Optimal Model: Lasso
prediction_score_review_Lasso_test = review_df[(review_df.N_stars == review_df.N_star_review_lasso) & (review_df.ML_group > Testing_size)].shape[0]/review_df[(review_df.ML_group > Testing_size)].shape[0]
print("The Lasso Regression model has a prediction score of {:.2f} on testing set".format(prediction_score_review_Lasso_test))
confusion_matrix_review_Lasso_test = np.zeros((5,5))
confusion_matrix_review_Lasso_test = pd.DataFrame(confusion_matrix_review_tree_train, columns=['1 (Prediction)','2','3','4','5'],index = ['1 (Actual)','2','3','4','5'])
for i in range(0,5):
for j in range(0,5):
confusion_matrix_review_Lasso_test.iloc[i][j] = (review_df[(review_df.N_stars == i + 1) & (review_df.N_star_review_lasso == j+1) & (review_df.ML_group > Testing_size)]).shape[0]
print(confusion_matrix_review_Lasso_test)
#%% 6.Building model for title analysis
#%% 6.1 Linear Regression
model = LinearRegression()
clf_title_linear = model.fit(X_title_train, y_train)
y_title_pred = clf_title_linear.predict(X_title_valid)
review_df['N_star_title_reg'] = np.concatenate(
[
clf_title_linear.predict(X_title_train),
clf_title_linear.predict(X_title_valid),
clf_title_linear.predict(X_title_test)
]
).round().astype(int)
review_df.loc[review_df['N_star_title_reg']>5,'N_star_title_reg'] = 5
review_df.loc[review_df['N_star_title_reg']<1,'N_star_title_reg'] = 1
confusion_matrix_title_linear_train = np.zeros((5,5))
confusion_matrix_title_linear_train = pd.DataFrame(confusion_matrix_title_linear_train, columns=['1 (Prediction)','2','3','4','5'],index = ['1 (Actual)','2','3','4','5'])
confusion_matrix_title_linear_valid = confusion_matrix_title_linear_train.copy()
for i in range(0,5):
for j in range(0,5):
confusion_matrix_title_linear_train.iloc[i][j] = (review_df[(review_df.N_stars == i + 1) & (review_df.N_star_title_reg == j+1) & (review_df.ML_group < Training_size)]).shape[0]
confusion_matrix_title_linear_valid.iloc[i][j] = (review_df[(review_df.N_stars == i + 1) & (review_df.N_star_title_reg == j+1) & ((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))]).shape[0]
print('Confusion matrix for linear regression on training data (title analysis)')
print(confusion_matrix_title_linear_train)
print('Confusion matrix for linear regression on validation data (title analysis)')
print(confusion_matrix_title_linear_valid)
prediction_score_title_linear_train = review_df[(review_df.N_stars == review_df.N_star_title_reg) & (review_df.ML_group < Training_size)].shape[0]/review_df[(review_df.ML_group < Training_size)].shape[0]
print("The linear model has a prediction score of {:.2f} on training set".format(prediction_score_title_linear_train))
prediction_score_title_linear_valid = review_df[(review_df.N_stars == review_df.N_star_title_reg) & ((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]/review_df[((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]
print("The linear model has a prediction score of {:.2f} on validation set".format(prediction_score_title_linear_valid))
#%% 6.2 Lasso Regression
model = linear_model.Lasso(alpha=0.1)
clf_title_Lasso = model.fit(X_title_train, y_train)
review_df['N_star_title_lasso'] = np.concatenate(
[
clf_title_Lasso.predict(X_title_train),
clf_title_Lasso.predict(X_title_valid),
clf_title_Lasso.predict(X_title_test)
]
).round().astype(int)
review_df.loc[review_df['N_star_title_lasso']>5,'N_star_title_lasso'] = 5
review_df.loc[review_df['N_star_title_lasso']<1,'N_star_title_lasso'] = 1
# Now build the confusion matrix for Lasso Regression
confusion_matrix_title_Lasso_train = np.zeros((5,5))
confusion_matrix_title_Lasso_train = pd.DataFrame(confusion_matrix_title_Lasso_train, columns=['1 (Prediction)','2','3','4','5'],index = ['1 (Actual)','2','3','4','5'])
confusion_matrix_title_Lasso_valid = confusion_matrix_title_Lasso_train.copy()
for i in range(0,5):
for j in range(0,5):
confusion_matrix_title_Lasso_train.iloc[i][j] = (review_df[(review_df.N_stars == i + 1) & (review_df.N_star_title_lasso == j+1) & (review_df.ML_group < Training_size)]).shape[0]
confusion_matrix_title_Lasso_valid.iloc[i][j] = (review_df[(review_df.N_stars == i + 1) & (review_df.N_star_title_lasso == j+1) & (review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size)]).shape[0]
print('Confusion matrix for Lasso Regression on training data (title analysis)')
print(confusion_matrix_title_Lasso_train)
print('Confusion matrix for Lasso Regression on validation data (title analysis)')
print(confusion_matrix_title_Lasso_valid)
prediction_score_title_Lasso_train = review_df[(review_df.N_stars == review_df.N_star_title_lasso) & (review_df.ML_group < Training_size)].shape[0]/review_df[(review_df.ML_group < Training_size)].shape[0]
print("The Lasso Regression model has a prediction score of {:.2f} on training set".format(prediction_score_title_Lasso_train))
prediction_score_title_Lasso_valid = review_df[(review_df.N_stars == review_df.N_star_title_lasso) & ((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]/review_df[((review_df.ML_group < Testing_size)&(review_df.ML_group >= Training_size))].shape[0]
print("The Lasso Regression model has a prediction score of {:.2f} on validation set".format(prediction_score_title_Lasso_valid))
#%% 6.3 KNN
k = 1;
results_list_knn = [];
max_k_nn = 50
for k in range(1,max_k_nn + 1):
clf_title_knn = KNeighborsClassifier(n_neighbors=k).fit(X_title_train, y_train)
results_list_knn.append(
np.concatenate(
[
clf_title_knn.predict(X_title_train),
clf_title_knn.predict(X_title_valid),
clf_title_knn.predict(X_title_test )
])
)
print('K = {} is done'.format(k))
title_results_knn = pd.DataFrame(results_list_knn).transpose()
title_results_knn['df_train'] = df_train.to_list()
title_results_knn['df_valid'] = df_valid.to_list()
title_results_knn['df_test'] = df_valid.to_list()
title_results_knn['N_stars'] = review_df['N_stars'].copy()
#%% Build a dictionary that stores the validation accuracy of each K.
knn_title_dict = {}
for i in range(1,max_k_nn):
knn_title_dict[i] = title_results_knn[(title_results_knn.N_stars == title_results_knn[i-1]) & (title_results_knn.df_valid == True)].shape[0]/review_df[(title_results_knn.df_valid == True)].shape[0]
# Rank the testing accuracy and get the best parameter setting for K
best_k_title = max(knn_title_dict.items(), key=operator.itemgetter(1))[0] + 1
print("The best parameter for k is",best_k_title,"and the best validation accuracy score is {:.2f}".format(knn_title_dict.get(best_k_title - 1)))
# Append the optimal knn result to review_df
try:
review_df.drop(['N_star_title_knn'], axis = 1)
except:
review_df.loc[:,'N_star_title_knn'] = title_results_knn.iloc[:, best_k_title -1]
# Now build the confusion matrix for the best parameter
confusion_matrix_title_knn_train = np.zeros((5,5))
confusion_matrix_title_knn_train = | pd.DataFrame(confusion_matrix_title_knn_train, columns=['1 (Prediction)','2','3','4','5'],index = ['1 (Actual)','2','3','4','5']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
import matplotlib
import sklearn.datasets, sklearn.decomposition
from sklearn.cluster import KMeans
from sklearn_extra.cluster import KMedoids
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import sklearn_extra
from scipy import stats
from scipy.stats import kurtosis, skew
from collections import defaultdict
import statistics
from itertools import chain
from scipy.interpolate import interp1d
from collections import defaultdict
from nested_dict import nested_dict
from sklearn.cluster import KMeans
from sklearn.cluster import kmeans_plusplus
def kmedoid_clusters(path_test):
editable_data_path =os.path.join(path_test, 'editable_values.csv')
editable_data = pd.read_csv(editable_data_path, header=None, index_col=0, squeeze=True).to_dict()[1]
city = editable_data['city']
save_path = os.path.join(path_test, str('Scenario Generation') , city)
cluster_numbers= int(editable_data['Cluster numbers']) +2
representative_days_path= os.path.join(path_test,'Scenario Generation',city, 'Operation Representative days')
representative_day = {}
representative_scenarios_list = []
for represent in range(cluster_numbers):
representative_day[represent] = pd.read_csv(os.path.join(representative_days_path,'Represent_days_modified_'+str(represent)+'.csv'))
representative_scenario = representative_day[represent]['Electricity total (kWh)'].tolist() + representative_day[represent]['Heating (kWh)'].tolist() #+representative_day[represent]['GTI (Wh/m^2)'].tolist() + \
#representative_day[represent]['Wind Speed (m/s)'].tolist() + representative_day[represent]['Electricity EF (kg/kWh)'].tolist()
representative_scenarios_list.append(representative_scenario)
folder_path = os.path.join(path_test,str(city))
#GTI_distribution = pd.read_csv(os.path.join(folder_path,'best_fit_GTI.csv'))
#wind_speed_distribution = pd.read_csv(os.path.join(folder_path,'best_fit_wind_speed.csv'))
range_data = ['low','medium','high']
scenario_genrated = {}
scenario_probability = defaultdict(list)
scenario_number = {}
num_scenario = 0
i_solar=range_data[1]
i_wind=range_data[1]
i_emission=range_data[1]
#laod the energy deamnd, solar, wind, and electricity emissions from scenario generation file
for i_demand in range_data:
if i_demand=='low':
p_demand = 0.277778
elif i_demand=='medium':
p_demand = 0.444444
elif i_demand=='high':
p_demand = 0.277778
for day in range(365):
#p_solar[i_solar][day] = sum(solar_probability[i_solar][day*24:(day+1)*24])/(sum(solar_probability[range_data[0]][day*24:(day+1)*24])+sum(solar_probability[range_data[1]][day*24:(day+1)*24])+sum(solar_probability[range_data[2]][day*24:(day+1)*24]))
#p_wind[i_wind][day] = sum(wind_probability[i_wind][day*24:(day+1)*24])/(sum(wind_probability[range_data[0]][day*24:(day+1)*24])+sum(wind_probability[range_data[1]][day*24:(day+1)*24])+sum(wind_probability[range_data[2]][day*24:(day+1)*24]))
scenario_probability['D:'+i_demand+'/S:'+i_solar+'/W:'+i_wind+'/C:'+i_emission].append(p_demand)
scenario_number['D:'+i_demand+'/S:'+i_solar+'/W:'+i_wind+'/C:'+i_emission]= num_scenario
num_scenario = num_scenario + 1
scenario_genrated['D:'+i_demand+'/S:'+i_solar+'/W:'+i_wind+'/C:'+i_emission] = pd.read_csv(os.path.join(save_path, 'D_'+i_demand+'_S_'+i_solar+'_W_'+i_wind+'_C_'+i_emission+'.csv'), header=None)
features_scenarios = defaultdict(list)
features_scenarios_list = []
features_probability_list = []
features_scenarios_nested = nested_dict()
k=0
days= 365
for scenario in scenario_genrated.keys():
scenario_genrated[scenario]=scenario_genrated[scenario]
for i in range(days):
if i==0:
data = scenario_genrated[scenario][1:25]
else:
data = scenario_genrated[scenario][25+(i-1)*24:25+(i)*24]
#Total electricity, heating, solar, wind, EF.
daily_list =list(chain(data[0].astype('float', copy=False),data[1].astype('float', copy=False)))
features_scenarios[k*days+i] = daily_list
features_scenarios_nested[scenario][i] = features_scenarios[k*days+i]
features_scenarios_list.append(features_scenarios[k*days+i])
features_probability_list.append(scenario_probability[scenario][i])
k = k+1
A = np.asarray(features_scenarios_list)
B = np.asarray(representative_scenarios_list)
C = np.asarray(representative_scenarios_list+features_scenarios_list)
#Convert the dictionary of features to Series
standardization_data = StandardScaler()
A_scaled = standardization_data.fit_transform(A)
C_scaled = standardization_data.fit_transform(C)
#print('Score of features', scores_pca)
#print('Explained variance ratio',pca.explained_variance_ratio_)
# Plot the explained variances
# Save components to a DataFrame
inertia_list = []
search_optimum_cluster = editable_data['Search optimum clusters'] # if I want to search for the optimum number of clusters: 1 is yes, 0 is no
kmeans = KMeans(n_clusters=cluster_numbers, n_init = 1, init = C_scaled[0:cluster_numbers]).fit(C_scaled)
labels = kmeans.labels_
clu_centres = kmeans.cluster_centers_
z={i: np.where(kmeans.labels_ == i)[0] for i in range(kmeans.n_clusters)}
z_length = []
for i in range(kmeans.n_clusters):
z_length.append(len(z[i])/len(labels))
data_represent_days_modified={'Electricity total (kWh)': representative_day[i]['Electricity total (kWh)'],
'Heating (kWh)': representative_day[i]['Heating (kWh)'],
'Percent %': round(len(z[i])/len(labels)*100,4)}
df_represent_days_modified= | pd.DataFrame(data_represent_days_modified) | pandas.DataFrame |
import pickle5 as pickle
import matplotlib.pyplot as plt
import matplotlib as mpl
#mpl.use('pdf')
import itertools
import numpy as np
from datetime import datetime
import torch
from torch import nn
from torch import optim
import os
import sys
import pandas as pd
from matplotlib import interactive
from matplotlib.patches import Rectangle
from utils import make_histos
from utils.utilities import meter
from utils.utilities import cartesian_converter
sys.path.insert(0,'/mnt/c/Users/rober/Dropbox/Bobby/Linux/classes/GAML/GAMLX/nflows/nflows')
from nflows.transforms.autoregressive import MaskedUMNNAutoregressiveTransform
from nflows.flows.base import Flow
from nflows.distributions.normal import StandardNormal
from nflows.distributions.normal import DiagonalNormal
from nflows.transforms.base import CompositeTransform
from nflows.transforms.autoregressive import MaskedAffineAutoregressiveTransform
from nflows.transforms.permutations import ReversePermutation
#data_path = "gendata/4features/" #Just electorn features
#data_path = "gendata/16features/" #All 16 features
#data_path = "gendata/Cond/16features/maaf/"
data_path = "gendata/Cond/16features/UMNN/"
physics_cuts = False
gen_all_emd = False
gen_1d_histos = True
gen_emd_comp = False
dfs = []
filenames = os.listdir(data_path)
for f in filenames:
df0 = pd.read_pickle(data_path+f)
dfs.append(df0)
df_nflow_data = pd.concat(dfs)
nflow_data_len = len(df_nflow_data.index)
print("The Generated dataset has {} events".format(nflow_data_len))
with open('data/pi0.pkl', 'rb') as f:
xz = np.array(pickle.load(f), dtype=np.float32)
x = cartesian_converter(xz,type='x')
z = cartesian_converter(xz,type='z')
df_test_data = pd.DataFrame(x)
df_test_data_z = | pd.DataFrame(z) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 28 13:41:40 2021
@author: bruger
"""
import pandas as pd
import re
from pathlib import Path
from dataclasses import dataclass
import functools
from tqdm import tqdm
import os
from modelclass import model
import modelmf
import modelmanipulation as mp
import modelnormalize as nz
assert 1==1
from modelclass import model
#%% functions
@dataclass
class GrapUNModel():
'''This class takes a UN specification, variable data and variable description
and transform it to ModelFlow business language'''
frml : str = 'model/mod_text.txt' # path to model
data : str = 'data/model_data.xlsx' # path to data
modelname : str = 'Asia' # modelname
start : int = 2017
end : int = 2050
country_trans : any = lambda x:x[:].replace('$','_DOLLAR') # function which transform model specification
country_df_trans : any = lambda x:x # function which transforms initial dataframe
def __post_init__(self):
# breakpoint()
print(f'\nProcessing the model:{self.modelname}',flush=True)
self.rawmodel_org_text = open(self.frml).read().upper()
self.eview_names = set(re.findall('@[A-Z0-1_]*',self.rawmodel_org_text))
raw_list = [l for l in self.rawmodel_org_text.split('\n') ]
select = lambda l: len(l.strip()) and not '@INNOV' in l and '@ADD' not in l
raw_eq = [l for l in raw_list if select(l) ]
noise_list = [l for l in raw_list if not select(l) ]
add_list = [l.split() for l in raw_list if '@ADD' in l]
add_vars = {endo for add,endo,add_factor in add_list}
self.rawmodel_org = '\n'.join(raw_eq)
self.rawmodel = self.country_trans(self.rawmodel_org)
# rawmodel6 = self.trans_eviews(self.rawmodel)
rawmodel6 = '\n'.join(self.trans_eviews(r) for r in self.rawmodel.split('\n'))
line_type = []
line =[]
# breakpoint()
bars = '{desc}: {percentage:3.0f}%|{bar}|{n_fmt}/{total_fmt}'
with tqdm(total=len(rawmodel6.split('\n')),desc='Reading original model',bar_format=bars) as pbar:
for l in rawmodel6.split('\n'):
# the logic says this
#line_type.append('iden'if l.startswith('@IDENTITY ') else 'stoc' )
#but equations like USA_INT says this
line_type.append('stoc' )
line.append(l.replace('@IDENTITY ',''))
# print(f' {sec} {l[:30]} ....')
pbar.update(1)
self.all_frml = [nz.normal(l,add_adjust=(typ=='stoc')) for l,typ in tqdm(zip(line,line_type),desc='Normalizing model',total=len(line),bar_format=bars)]
lfname = ["<Z,EXO> " if typ == 'stoc' else '' for typ in line_type ]
lfname = ["" if typ == 'stoc' else '' for typ in line_type ]
# breakpoint()
self.rorg = [fname + f.normalized for f,fname in zip(self.all_frml,lfname) ]
self.rres = [f.calc_adjustment for f in self.all_frml if len(f.calc_adjustment)]
# breakpoint()
self.fmodel = mp.exounroll(mp.tofrml ('\n'.join(self.rorg)))
self.fres = ('\n'.join(self.rres))
self.mmodel = model(self.fmodel,modelname = self.modelname)
self.mmodel.set_var_description(self.asia_des)
self.mres = model(self.fres,modelname = f'Adjustment factors for {self.modelname}')
# breakpoint()
self.base_input = self.mres.res(self.dfmodel,self.start,self.end)
@functools.cached_property
def dfmodel(self):
'''The original input data enriched with during variablees, variables containing
values for specific historic years and model specific transformation '''
# Now the data
df = (pd.read_excel(self.data).
pipe( lambda df : df.rename(columns={c:c.upper() for c in df.columns}))
.pipe( lambda df : df[[c for c in df.columns if not c+'_0' in df.columns]])
.pipe( lambda df : df.rename(columns = {c : c[:-2] if c.endswith('_0') else c for c in df.columns}))
.pipe( lambda df : df.rename(columns={'UNNAMED: 0':'DATEID'}))
.pipe( lambda df : df.set_index('DATEID'))
.pipe( lambda df : df.rename(columns = {c : c.replace('$','_DOLLAR') for c in df.columns}))
)
try:
sca = pd.read_excel(self.scalars ,index_col=0,header=None).T.pipe(
lambda _df : _df.loc[_df.index.repeat(len(df.index)),:]).\
set_index(df.index)
df= pd.concat([df,sca],axis=1)
except:
print(f'{self.modelname} no Scalars prowided ')
#% Now set the vars with fixedvalues
value_vars = self.mmodel.vlist('*_value_*')
for var,val,year in (v.rsplit('_',2) for v in value_vars) :
df.loc[:,f'{var}_{val}_{year}'] = df.loc[int(year),var]
self.showvaluevars = df[value_vars]
#% now set the values of the dummies
during_vars = self.mmodel.vlist('*during_*')
for var,(dur,per) in ((v,v.split('_',1)) for v in during_vars):
df.loc[:,var]=0
# print(var,dur,per)
pers = per.split('_')
if len(pers) == 1:
df.loc[int(pers[0]),var] = 1
else:
df.loc[int(pers[0]):int(pers[1]),var]=1.
self.showduringvars = df[during_vars]
# breakpoint()
df_out = self.mmodel.insertModelVar(df).pipe(self.country_df_trans)
return df_out.loc[:2050,:].copy()
def __call__(self):
return self.mmodel,self.base_input
def test_model(self,start=None,end=None,maxvar=1_000_000, maxerr=100,tol=0.0001,showall=False):
'''
Compares a straight calculation with the input dataframe.
shows which variables dont have the same value
Args:
df (TYPE): dataframe to run.
start (TYPE, optional): start period. Defaults to None.
end (TYPE, optional): end period. Defaults to None.
maxvar (TYPE, optional): how many variables are to be chekked. Defaults to 1_000_000.
maxerr (TYPE, optional): how many errors to check Defaults to 100.
tol (TYPE, optional): check for absolute value of difference. Defaults to 0.0001.
showall (TYPE, optional): show more . Defaults to False.
Returns:
None.
'''
_start = start if start else self.start
_end = end if end else self.end
# breakpoint()
resresult = self.mmodel(self.base_input,_start,_end,reset_options=True,silent=0,solver='base_res')
self.mmodel.basedf = self.dfmodel
pd.options.display.float_format = '{:.10f}'.format
err=0
print(f'\nChekking residuals for {self.mmodel.name} {_start} to {_end}')
for i,v in enumerate(self.mmodel.solveorder):
if i > maxvar : break
if err > maxerr : break
check = self.mmodel.get_values(v,pct=True).T
check.columns = ['Before check','After calculation','Difference','Pct']
# breakpoint()
if (check.Difference.abs() >= tol).any():
err=err+1
maxdiff = check.Difference.abs().max()
maxpct = check.Pct.abs().max()
# breakpoint()
print('\nVariable with residuals above threshold')
print(f"{v}, Max difference:{maxdiff:15.8f} Max Pct {maxpct:15.10f}% It is number {i} in the solveorder and error number {err}")
if showall:
print(f'\n{self.mmodel.allvar[v]["frml"]}')
print(f'\nResult of equation \n {check}')
print(f'\nEquation values before calculations: \n {self.mmodel.get_eq_values(v,last=False,showvar=1)} \n')
self.mmodel.oldkwargs = {}
@property
def var_description(self):
'''
'''
if isinstance(self.des,dict):
return self.des
try:
trans0 = | pd.read_excel(self.des) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 15:07:19 2020
@author: <NAME>
"""
import os
import csv
import pandas as pd
import numpy as np
import shutil
import readers.log_reader as lr
import utils.support as sup
import readers.log_splitter as ls
from model_training.features_manager import FeaturesMannager as feat
from model_training import embedding_training as em
from model_training import model_optimizer as op
from model_training import model_hpc_optimizer as hpc_op
class ModelTrainer():
"""
This is the man class encharged of the model training
"""
def __init__(self, params):
"""constructor"""
self.log = self.load_log(params)
# Split validation partitions
self.log_train = pd.DataFrame()
self.log_test = pd.DataFrame()
# Activities and roles indexes
self.ac_index = dict()
self.index_ac = dict()
self.rl_index = dict()
self.index_rl = dict()
# Training examples
self.examples = dict()
# Embedded dimensions
self.ac_weights = list()
self.rl_weights = list()
# Preprocess the event-log
self.preprocess(params)
# Train model
params['output'] = os.path.join('output_files', sup.folder_id())
if params['opt_method'] == 'rand_hpc':
optimizer = hpc_op.ModelHPCOptimizer(params,
self.log,
self.ac_index,
self.rl_index)
optimizer.execute_trials()
elif params['opt_method'] == 'bayesian':
optimizer = op.ModelOptimizer(params,
self.log,
self.ac_index,
self.ac_weights,
self.rl_index,
self.rl_weights)
optimizer.execute_trials()
# Export results
output_path = os.path.join('output_files', sup.folder_id())
shutil.copytree(optimizer.best_output, output_path)
shutil.copy(optimizer.file_name, output_path)
self.export_parms(output_path, optimizer.best_parms)
# Remove folder
shutil.rmtree(params['output'])
def preprocess(self, params):
self.log = feat.add_resources(self.log, params['rp_sim'])
# indexes creation
self.indexing()
# split validation
self.split_timeline(0.8, params['one_timestamp'])
# Load embedded matrix
ac_emb_name = 'ac_' + params['file_name'].split('.')[0]+'.emb'
rl_emb_name = 'rl_' + params['file_name'].split('.')[0]+'.emb'
if os.path.exists(os.path.join('input_files',
'embedded_matix',
ac_emb_name)):
self.ac_weights = self.load_embedded(self.index_ac, ac_emb_name)
self.rl_weights = self.load_embedded(self.index_rl, rl_emb_name)
else:
em.training_model(params,
self.log,
self.ac_index, self.index_ac,
self.rl_index, self.index_rl)
self.ac_weights = self.load_embedded(self.index_ac, ac_emb_name)
self.rl_weights = self.load_embedded(self.index_rl, rl_emb_name)
@staticmethod
def load_log(params):
params['read_options']['filter_d_attrib'] = False
log = lr.LogReader(os.path.join('input_files', params['file_name']),
params['read_options'])
log_df = pd.DataFrame(log.data)
if set(['Unnamed: 0', 'role']).issubset(set(log_df.columns)):
log_df.drop(columns=['Unnamed: 0', 'role'], inplace=True)
log_df = log_df[~log_df.task.isin(['Start', 'End'])]
return log_df
def indexing(self):
# Activities index creation
self.ac_index = self.create_index(self.log, 'task')
self.ac_index['start'] = 0
self.ac_index['end'] = len(self.ac_index)
self.index_ac = {v: k for k, v in self.ac_index.items()}
# Roles index creation
self.rl_index = self.create_index(self.log, 'role')
self.rl_index['start'] = 0
self.rl_index['end'] = len(self.rl_index)
self.index_rl = {v: k for k, v in self.rl_index.items()}
# Add index to the event log
ac_idx = lambda x: self.ac_index[x['task']]
self.log['ac_index'] = self.log.apply(ac_idx, axis=1)
rl_idx = lambda x: self.rl_index[x['role']]
self.log['rl_index'] = self.log.apply(rl_idx, axis=1)
@staticmethod
def create_index(log_df, column):
"""Creates an idx for a categorical attribute.
parms:
log_df: dataframe.
column: column name.
Returns:
index of a categorical attribute pairs.
"""
temp_list = log_df[[column]].values.tolist()
subsec_set = {(x[0]) for x in temp_list}
subsec_set = sorted(list(subsec_set))
alias = dict()
for i, _ in enumerate(subsec_set):
alias[subsec_set[i]] = i + 1
return alias
def split_timeline(self, size: float, one_ts: bool) -> None:
"""
Split an event log dataframe by time to peform split-validation.
prefered method time splitting removing incomplete traces.
If the testing set is smaller than the 10% of the log size
the second method is sort by traces start and split taking the whole
traces no matter if they are contained in the timeframe or not
Parameters
----------
size : float, validation percentage.
one_ts : bool, Support only one timestamp.
"""
# Split log data
splitter = ls.LogSplitter(self.log)
train, test = splitter.split_log('timeline_contained', size, one_ts)
total_events = len(self.log)
# Check size and change time splitting method if necesary
if len(test) < int(total_events*0.1):
train, test = splitter.split_log('timeline_trace', size, one_ts)
# Set splits
key = 'end_timestamp' if one_ts else 'start_timestamp'
test = pd.DataFrame(test)
train = | pd.DataFrame(train) | pandas.DataFrame |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.nunique(), pdf.nunique())
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
def test_sort_values(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
for ascending in [True, False]:
for na_position in ["first", "last"]:
self.assert_eq(
psdf.sort_values("a", ascending=ascending, na_position=na_position),
pdf.sort_values("a", ascending=ascending, na_position=na_position),
)
self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"]))
self.assert_eq(
psdf.sort_values(["a", "b"], ascending=[False, True]),
pdf.sort_values(["a", "b"], ascending=[False, True]),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False]))
self.assert_eq(
psdf.sort_values(["a", "b"], na_position="first"),
pdf.sort_values(["a", "b"], na_position="first"),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid"))
pserA = pdf.a
psserA = psdf.a
self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# multi-index columns
pdf = pd.DataFrame(
{("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]},
index=np.random.rand(7),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20)))
self.assert_eq(
psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)])
)
self.assertRaisesRegex(
ValueError,
"For a multi-index, the label must be a tuple with elements",
lambda: psdf.sort_values(["X"]),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(20), pdf.sort_values(20))
self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10]))
def test_sort_index(self):
pdf = pd.DataFrame(
{"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan]
)
psdf = ps.from_pandas(pdf)
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Assert sorting descending
self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first"))
# Assert sorting descending and NA indices first
self.assert_eq(
psdf.sort_index(ascending=False, na_position="first"),
pdf.sort_index(ascending=False, na_position="first"),
)
# Assert sorting inplace
pserA = pdf.A
psserA = psdf.A
self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# Assert multi-indices
pdf = pd.DataFrame(
{"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0]))
self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index())
# Assert with multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(0, 2), psdf.swaplevel(0, 2))
self.assert_eq(pdf.swaplevel(1, 2), psdf.swaplevel(1, 2))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel(-1, -2), psdf.swaplevel(-1, -2))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
self.assert_eq(pdf.swaplevel("number", "size"), psdf.swaplevel("number", "size"))
self.assert_eq(pdf.swaplevel("color", "size"), psdf.swaplevel("color", "size"))
self.assert_eq(
pdf.swaplevel("color", "size", axis="index"),
psdf.swaplevel("color", "size", axis="index"),
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=0), psdf.swaplevel("color", "size", axis=0)
)
pdf = pd.DataFrame(
{
"x1": ["a", "b", "c", "d"],
"x2": ["a", "b", "c", "d"],
"x3": ["a", "b", "c", "d"],
"x4": ["a", "b", "c", "d"],
}
)
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf.columns = pidx
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(axis=1), psdf.swaplevel(axis=1))
self.assert_eq(pdf.swaplevel(0, 1, axis=1), psdf.swaplevel(0, 1, axis=1))
self.assert_eq(pdf.swaplevel(0, 2, axis=1), psdf.swaplevel(0, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 2, axis=1), psdf.swaplevel(1, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 1, axis=1), psdf.swaplevel(1, 1, axis=1))
self.assert_eq(pdf.swaplevel(-1, -2, axis=1), psdf.swaplevel(-1, -2, axis=1))
self.assert_eq(
pdf.swaplevel("number", "color", axis=1), psdf.swaplevel("number", "color", axis=1)
)
self.assert_eq(
pdf.swaplevel("number", "size", axis=1), psdf.swaplevel("number", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=1), psdf.swaplevel("color", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis="columns"),
psdf.swaplevel("color", "size", axis="columns"),
)
# Error conditions
self.assertRaises(AssertionError, lambda: ps.DataFrame([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psdf.swaplevel(0, 9, axis=1))
self.assertRaises(KeyError, lambda: psdf.swaplevel("not_number", "color", axis=1))
self.assertRaises(ValueError, lambda: psdf.swaplevel(axis=2))
def test_swapaxes(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["x", "y", "z"], columns=["a", "b", "c"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.swapaxes(0, 1), pdf.swapaxes(0, 1))
self.assert_eq(psdf.swapaxes(1, 0), pdf.swapaxes(1, 0))
self.assert_eq(psdf.swapaxes("index", "columns"), pdf.swapaxes("index", "columns"))
self.assert_eq(psdf.swapaxes("columns", "index"), pdf.swapaxes("columns", "index"))
self.assert_eq((psdf + 1).swapaxes(0, 1), (pdf + 1).swapaxes(0, 1))
self.assertRaises(AssertionError, lambda: psdf.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psdf.swapaxes(0, -1))
def test_nlargest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a"))
self.assert_eq(psdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"]))
def test_nsmallest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a"))
self.assert_eq(
psdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"])
)
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "locomotion"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs("mammal"), pdf.xs("mammal"))
self.assert_eq(psdf.xs(("mammal",)), pdf.xs(("mammal",)))
self.assert_eq(psdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks")))
self.assert_eq(
ps.concat([psdf, psdf]).xs(("mammal", "dog", "walks")),
pd.concat([pdf, pdf]).xs(("mammal", "dog", "walks")),
)
self.assert_eq(psdf.xs("cat", level=1), pdf.xs("cat", level=1))
self.assert_eq(psdf.xs("flies", level=2), pdf.xs("flies", level=2))
self.assert_eq(psdf.xs("mammal", level=-3), pdf.xs("mammal", level=-3))
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.xs("num_wings", axis=1)
with self.assertRaises(KeyError):
psdf.xs(("mammal", "dog", "walk"))
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psdf.xs(("mammal", "dog", "walks", "foo"))
msg = "'key' should be a scalar value or tuple that contains scalar values"
with self.assertRaisesRegex(TypeError, msg):
psdf.xs(["mammal", "dog", "walks", "foo"])
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=-4))
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=3))
self.assertRaises(KeyError, lambda: psdf.xs(("dog", "walks"), level=1))
# non-string names
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "num_legs", "num_wings"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs(("mammal", "dog", 4)), pdf.xs(("mammal", "dog", 4)))
self.assert_eq(psdf.xs(2, level=2), pdf.xs(2, level=2))
self.assert_eq((psdf + "a").xs(("mammal", "dog", 4)), (pdf + "a").xs(("mammal", "dog", 4)))
self.assert_eq((psdf + "a").xs(2, level=2), (pdf + "a").xs(2, level=2))
def test_missing(self):
psdf = self.psdf
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)()
missing_properties = inspect.getmembers(
_MissingPandasLikeDataFrame, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)
def test_to_numpy(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_numpy(), pdf.values)
def test_to_pandas(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.to_pandas(), pdf)
def test_isin(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.isin([4, "six"]), pdf.isin([4, "six"]))
# Seems like pandas has a bug when passing `np.array` as parameter
self.assert_eq(psdf.isin(np.array([4, "six"])), pdf.isin([4, "six"]))
self.assert_eq(
psdf.isin({"a": [2, 8], "c": ["three", "one"]}),
pdf.isin({"a": [2, 8], "c": ["three", "one"]}),
)
self.assert_eq(
psdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
pdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
)
msg = "'DataFrame' object has no attribute {'e'}"
with self.assertRaisesRegex(AttributeError, msg):
psdf.isin({"e": [5, 7], "a": [1, 6]})
msg = "DataFrame and Series are not supported"
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.isin(pdf)
msg = "Values should be iterable, Series, DataFrame or dict."
with self.assertRaisesRegex(TypeError, msg):
psdf.isin(1)
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, None, 9, 4, None, 4],
"c": [None, 5, None, 3, 2, 1],
},
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), pdf.isin([4, 3, 1, 1, None]))
else:
expected = pd.DataFrame(
{
"a": [True, False, True, True, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, True, False, True],
}
)
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), expected)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.isin({"b": [4, 3, 1, 1, None]}), pdf.isin({"b": [4, 3, 1, 1, None]})
)
else:
expected = pd.DataFrame(
{
"a": [False, False, False, False, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, False, False, False],
}
)
self.assert_eq(psdf.isin({"b": [4, 3, 1, 1, None]}), expected)
def test_merge(self):
left_pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"value": [1, 2, 3, 5, 6, 7],
"x": list("abcdef"),
},
columns=["lkey", "value", "x"],
)
right_pdf = pd.DataFrame(
{
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [4, 5, 6, 7, 8, 9],
"y": list("efghij"),
},
columns=["rkey", "value", "y"],
)
right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
right_psser = ps.from_pandas(right_ps)
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, on=("value",)))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
# MultiIndex
check(
lambda left, right: left.merge(
right, left_on=["lkey", "value"], right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.set_index(["lkey", "value"]).merge(
right, left_index=True, right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.merge(
right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True
)
)
# TODO: when both left_index=True and right_index=True with multi-index
# check(lambda left, right: left.set_index(['lkey', 'value']).merge(
# right.set_index(['rkey', 'value']), left_index=True, right_index=True))
# join types
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, on="value", how=how))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how))
# suffix
check(
lambda left, right: left.merge(
right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"]
)
)
# Test Series on the right
check(lambda left, right: left.merge(right), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x"), right_psser, right_ps
)
check(
lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"),
right_psser,
right_ps,
)
# Test join types with Series
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, how=how), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x", how=how),
right_psser,
right_ps,
)
# suffix with Series
check(
lambda left, right: left.merge(
right,
suffixes=["_left", "_right"],
how="outer",
left_index=True,
right_index=True,
),
right_psser,
right_ps,
)
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([(10, "lkey"), (10, "value"), (20, "x")])
left_pdf.columns = left_columns
left_psdf.columns = left_columns
right_columns = pd.MultiIndex.from_tuples([(10, "rkey"), (10, "value"), (30, "y")])
right_pdf.columns = right_columns
right_psdf.columns = right_columns
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[(10, "value")]))
check(
lambda left, right: (left.set_index((10, "lkey")).merge(right.set_index((10, "rkey"))))
)
check(
lambda left, right: (
left.set_index((10, "lkey")).merge(
right.set_index((10, "rkey")), left_index=True, right_index=True
)
)
)
# TODO: when both left_index=True and right_index=True with multi-index columns
# check(lambda left, right: left.merge(right,
# left_on=[('a', 'lkey')], right_on=[('a', 'rkey')]))
# check(lambda left, right: (left.set_index(('a', 'lkey'))
# .merge(right, left_index=True, right_on=[('a', 'rkey')])))
# non-string names
left_pdf.columns = [10, 100, 1000]
left_psdf.columns = [10, 100, 1000]
right_pdf.columns = [20, 100, 2000]
right_psdf.columns = [20, 100, 2000]
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[100]))
check(lambda left, right: (left.set_index(10).merge(right.set_index(20))))
check(
lambda left, right: (
left.set_index(10).merge(right.set_index(20), left_index=True, right_index=True)
)
)
def test_merge_same_anchor(self):
pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [1, 1, 3, 5, 6, 7],
"x": list("abcdef"),
"y": list("efghij"),
},
columns=["lkey", "rkey", "value", "x", "y"],
)
psdf = ps.from_pandas(pdf)
left_pdf = pdf[["lkey", "value", "x"]]
right_pdf = pdf[["rkey", "value", "y"]]
left_psdf = psdf[["lkey", "value", "x"]]
right_psdf = psdf[["rkey", "value", "y"]]
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
def test_merge_retains_indices(self):
left_pdf = pd.DataFrame({"A": [0, 1]})
right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_index=True),
left_pdf.merge(right_pdf, left_index=True, right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_index=True),
left_pdf.merge(right_pdf, left_on="A", right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_on="B"),
left_pdf.merge(right_pdf, left_index=True, right_on="B"),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_on="B"),
left_pdf.merge(right_pdf, left_on="A", right_on="B"),
)
def test_merge_how_parameter(self):
left_pdf = pd.DataFrame({"A": [1, 2]})
right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True)
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True)
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="left")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="right")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="outer")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
def test_merge_raises(self):
left = ps.DataFrame(
{"value": [1, 2, 3, 5, 6], "x": list("abcde")},
columns=["value", "x"],
index=["foo", "bar", "baz", "foo", "bar"],
)
right = ps.DataFrame(
{"value": [4, 5, 6, 7, 8], "y": list("fghij")},
columns=["value", "y"],
index=["baz", "foo", "bar", "baz", "foo"],
)
with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"):
left[["x"]].merge(right[["y"]])
with self.assertRaisesRegex(ValueError, "not a combination of both"):
left.merge(right, on="value", left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_index=True)
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_on="y")
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_index=True)
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on="value", right_on=["value", "y"])
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on=["value", "x"], right_on="value")
with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"):
left.merge(right, left_index=True, right_index=True, how="foo")
with self.assertRaisesRegex(KeyError, "id"):
left.merge(right, on="id")
def test_append(self):
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"))
psdf = ps.from_pandas(pdf)
other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3])
other_psdf = ps.from_pandas(other_pdf)
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
self.assert_eq(psdf.append(psdf, ignore_index=True), pdf.append(pdf, ignore_index=True))
# Assert DataFrames with non-matching columns
self.assert_eq(psdf.append(other_psdf), pdf.append(other_pdf))
# Assert appending a Series fails
msg = "DataFrames.append() does not support appending Series to DataFrames"
with self.assertRaises(TypeError, msg=msg):
psdf.append(psdf["A"])
# Assert using the sort parameter raises an exception
msg = "The 'sort' parameter is currently not supported"
with self.assertRaises(NotImplementedError, msg=msg):
psdf.append(psdf, sort=True)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
psdf.append(other_psdf, verify_integrity=True),
pdf.append(other_pdf, verify_integrity=True),
)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psdf.append(psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
psdf.append(psdf, ignore_index=True, verify_integrity=True),
pdf.append(pdf, ignore_index=True, verify_integrity=True),
)
# Assert appending multi-index DataFrames
multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]])
multi_index_psdf = ps.from_pandas(multi_index_pdf)
other_multi_index_pdf = pd.DataFrame(
[[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]]
)
other_multi_index_psdf = ps.from_pandas(other_multi_index_pdf)
self.assert_eq(
multi_index_psdf.append(multi_index_psdf), multi_index_pdf.append(multi_index_pdf)
)
# Assert DataFrames with non-matching columns
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf),
multi_index_pdf.append(other_multi_index_pdf),
)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf, verify_integrity=True),
multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True),
)
with self.assertRaises(ValueError, msg=msg):
multi_index_psdf.append(multi_index_psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
multi_index_psdf.append(multi_index_psdf, ignore_index=True, verify_integrity=True),
multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True),
)
# Assert trying to append DataFrames with different index levels
msg = "Both DataFrames have to have the same number of index levels"
with self.assertRaises(ValueError, msg=msg):
psdf.append(multi_index_psdf)
# Skip index level check when ignore_index=True
self.assert_eq(
psdf.append(multi_index_psdf, ignore_index=True),
pdf.append(multi_index_pdf, ignore_index=True),
)
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
def test_clip(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psdf.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psdf.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psdf.clip(), pdf.clip())
# Assert lower only
self.assert_eq(psdf.clip(1), pdf.clip(1))
# Assert upper only
self.assert_eq(psdf.clip(upper=3), pdf.clip(upper=3))
# Assert lower and upper
self.assert_eq(psdf.clip(1, 3), pdf.clip(1, 3))
pdf["clip"] = pdf.A.clip(lower=1, upper=3)
psdf["clip"] = psdf.A.clip(lower=1, upper=3)
self.assert_eq(psdf, pdf)
# Assert behavior on string values
str_psdf = ps.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3))
self.assert_eq(str_psdf.clip(1, 3), str_psdf)
def test_binary_operators(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf + psdf.copy(), pdf + pdf.copy())
self.assert_eq(psdf + psdf.loc[:, ["A", "B"]], pdf + pdf.loc[:, ["A", "B"]])
self.assert_eq(psdf.loc[:, ["A", "B"]] + psdf, pdf.loc[:, ["A", "B"]] + pdf)
self.assertRaisesRegex(
ValueError,
"it comes from a different dataframe",
lambda: ps.range(10).add(ps.range(10)),
)
self.assertRaisesRegex(
TypeError,
"add with a sequence is currently not supported",
lambda: ps.range(10).add(ps.range(10).id),
)
psdf_other = psdf.copy()
psdf_other.columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
self.assertRaisesRegex(
ValueError,
"cannot join with no overlapping index names",
lambda: psdf.add(psdf_other),
)
def test_binary_operator_add(self):
# Positive
pdf = pd.DataFrame({"a": ["x"], "b": ["y"], "c": [1], "d": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] + psdf["b"], pdf["a"] + pdf["b"])
self.assert_eq(psdf["c"] + psdf["d"], pdf["c"] + pdf["d"])
# Negative
ks_err_msg = "Addition can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + 1)
def test_binary_operator_sub(self):
# Positive
pdf = pd.DataFrame({"a": [2], "b": [1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] - psdf["b"], pdf["a"] - pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Subtraction can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" - psdf["b"])
ks_err_msg = "Subtraction can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - 1)
psdf = ps.DataFrame({"a": ["x"], "b": ["y"]})
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
def test_binary_operator_truediv(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] / psdf["b"], pdf["a"] / pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "True division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" / psdf["b"])
ks_err_msg = "True division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] / psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 / psdf["a"])
def test_binary_operator_floordiv(self):
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Floor division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] // psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 // psdf["a"])
ks_err_msg = "Floor division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" // psdf["b"])
def test_binary_operator_mod(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] % psdf["b"], pdf["a"] % pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Modulo can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % "literal")
ks_err_msg = "Modulo can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] % psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 % psdf["a"])
def test_binary_operator_multiply(self):
# Positive
pdf = pd.DataFrame({"a": ["x", "y"], "b": [1, 2], "c": [3, 4]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["b"] * psdf["c"], pdf["b"] * pdf["c"])
self.assert_eq(psdf["c"] * psdf["b"], pdf["c"] * pdf["b"])
self.assert_eq(psdf["a"] * psdf["b"], pdf["a"] * pdf["b"])
self.assert_eq(psdf["b"] * psdf["a"], pdf["b"] * pdf["a"])
self.assert_eq(psdf["a"] * 2, pdf["a"] * 2)
self.assert_eq(psdf["b"] * 2, pdf["b"] * 2)
self.assert_eq(2 * psdf["a"], 2 * pdf["a"])
self.assert_eq(2 * psdf["b"], 2 * pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [2]})
ks_err_msg = "Multiplication can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * 0.1)
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 0.1 * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["a"])
def test_sample(self):
pdf = pd.DataFrame({"A": [0, 2, 4]})
psdf = ps.from_pandas(pdf)
# Make sure the tests run, but we can't check the result because they are non-deterministic.
psdf.sample(frac=0.1)
psdf.sample(frac=0.2, replace=True)
psdf.sample(frac=0.2, random_state=5)
psdf["A"].sample(frac=0.2)
psdf["A"].sample(frac=0.2, replace=True)
psdf["A"].sample(frac=0.2, random_state=5)
with self.assertRaises(ValueError):
psdf.sample()
with self.assertRaises(NotImplementedError):
psdf.sample(n=1)
def test_add_prefix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
def test_add_suffix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
def test_join(self):
# check basic function
pdf1 = pd.DataFrame(
{"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"]
)
pdf2 = pd.DataFrame(
{"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"]
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# join with duplicated columns in Series
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
ks1 = ps.Series(["A1", "A5"], index=[1, 2], name="A")
psdf1.join(ks1, how="outer")
# join with duplicated columns in DataFrame
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
psdf1.join(psdf2, how="outer")
# check `on` parameter
join_pdf = pdf1.join(pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index("key").join(
pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index("key").join(
psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index columns
columns1 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "A")])
columns2 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "B")])
pdf1.columns = columns1
pdf2.columns = columns2
psdf1.columns = columns1
psdf2.columns = columns2
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# check `on` parameter
join_pdf = pdf1.join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index(("x", "key")).join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index(("x", "key")).join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index
midx1 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")], names=["index1", "index2"]
)
midx2 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf1.index = midx1
pdf2.index = midx2
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, on=["index1", "index2"], rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, on=["index1", "index2"], rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
with self.assertRaisesRegex(
ValueError, r'len\(left_on\) must equal the number of levels in the index of "right"'
):
psdf1.join(psdf2, on=["index1"], rsuffix="_right")
def test_replace(self):
pdf = pd.DataFrame(
{
"name": ["Ironman", "Captain America", "Thor", "Hulk"],
"weapon": ["Mark-45", "Shield", "Mjolnir", "Smash"],
},
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only for method='pad"
):
psdf.replace(method="bfill")
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only when limit=None"
):
psdf.replace(limit=10)
with self.assertRaisesRegex(
NotImplementedError, "replace currently doesn't supports regex"
):
psdf.replace(regex="")
with self.assertRaisesRegex(ValueError, "Length of to_replace and value must be same"):
psdf.replace(to_replace=["Ironman"], value=["Spiderman", "Doctor Strange"])
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace("Ironman", lambda x: "Spiderman")
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace(lambda x: "Ironman", "Spiderman")
self.assert_eq(psdf.replace("Ironman", "Spiderman"), pdf.replace("Ironman", "Spiderman"))
self.assert_eq(
psdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
pdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
)
self.assert_eq(
psdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
pdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
)
# inplace
pser = pdf.name
psser = psdf.name
pdf.replace("Ironman", "Spiderman", inplace=True)
psdf.replace("Ironman", "Spiderman", inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"A": [0, 1, 2, 3, np.nan], "B": [5, 6, 7, 8, np.nan], "C": ["a", "b", "c", "d", None]},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
pdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
)
self.assert_eq(
psdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(psdf.replace({"C": ["a", None]}, "e"), pdf.replace({"C": ["a", None]}, "e"))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
pdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
)
self.assert_eq(
psdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("Y", "C"): ["a", None]}, "e"),
pdf.replace({("Y", "C"): ["a", None]}, "e"),
)
def test_update(self):
# check base function
def get_data(left_columns=None, right_columns=None):
left_pdf = pd.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", np.nan, np.nan]}, columns=["A", "B"]
)
right_pdf = pd.DataFrame(
{"B": ["x", np.nan, "y", np.nan], "C": ["100", "200", "300", "400"]},
columns=["B", "C"],
)
left_psdf = ps.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", None, None]}, columns=["A", "B"]
)
right_psdf = ps.DataFrame(
{"B": ["x", None, "y", None], "C": ["100", "200", "300", "400"]}, columns=["B", "C"]
)
if left_columns is not None:
left_pdf.columns = left_columns
left_psdf.columns = left_columns
if right_columns is not None:
right_pdf.columns = right_columns
right_psdf.columns = right_columns
return left_psdf, left_pdf, right_psdf, right_pdf
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
pser = left_pdf.B
psser = left_psdf.B
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
self.assert_eq(psser.sort_index(), pser.sort_index())
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
with self.assertRaises(NotImplementedError):
left_psdf.update(right_psdf, join="right")
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
right_columns = pd.MultiIndex.from_tuples([("X", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
right_columns = pd.MultiIndex.from_tuples([("Y", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
def test_pivot_table_dtypes(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Skip columns comparison by reset_index
res_df = psdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
exp_df = pdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
self.assert_eq(res_df, exp_df)
# Results don't have the same column's name
# Todo: self.assert_eq(psdf.pivot_table(columns="a", values="b").dtypes,
# pdf.pivot_table(columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes, pdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes)
def test_pivot_table(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [10, 20, 20, 40, 20, 40],
"c": [1, 2, 9, 4, 7, 4],
"d": [-1, -2, -3, -4, -5, -6],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Checking if both DataFrames have the same results
self.assert_eq(
psdf.pivot_table(columns="a", values="b").sort_index(),
pdf.pivot_table(columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", fill_value=999
).sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "e"), ("z", "c"), ("w", "d")]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pivot_table(columns=("x", "a"), values=("x", "b")).sort_index(),
pdf.pivot_table(columns=[("x", "a")], values=[("x", "b")]).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e"), ("w", "d")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e"), ("w", "d")],
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")],
columns=("x", "a"),
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
almost=True,
)
def test_pivot_table_and_index(self):
# https://github.com/databricks/koalas/issues/805
pdf = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
ptable = pdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
ktable = psdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
self.assert_eq(ktable, ptable)
self.assert_eq(ktable.index, ptable.index)
self.assert_eq(repr(ktable.index), repr(ptable.index))
def test_stack(self):
pdf_single_level_cols = pd.DataFrame(
[[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"]
)
psdf_single_level_cols = ps.from_pandas(pdf_single_level_cols)
self.assert_eq(
psdf_single_level_cols.stack().sort_index(), pdf_single_level_cols.stack().sort_index()
)
multicol1 = pd.MultiIndex.from_tuples(
[("weight", "kg"), ("weight", "pounds")], names=["x", "y"]
)
pdf_multi_level_cols1 = pd.DataFrame(
[[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1
)
psdf_multi_level_cols1 = ps.from_pandas(pdf_multi_level_cols1)
self.assert_eq(
psdf_multi_level_cols1.stack().sort_index(), pdf_multi_level_cols1.stack().sort_index()
)
multicol2 = pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])
pdf_multi_level_cols2 = pd.DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=multicol2
)
psdf_multi_level_cols2 = ps.from_pandas(pdf_multi_level_cols2)
self.assert_eq(
psdf_multi_level_cols2.stack().sort_index(), pdf_multi_level_cols2.stack().sort_index()
)
pdf = pd.DataFrame(
{
("y", "c"): [True, True],
("x", "b"): [False, False],
("x", "c"): [True, False],
("y", "a"): [False, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.stack().sort_index(), pdf.stack().sort_index())
self.assert_eq(psdf[[]].stack().sort_index(), pdf[[]].stack().sort_index(), almost=True)
def test_unstack(self):
pdf = pd.DataFrame(
np.random.randn(3, 3),
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.unstack().sort_index(), pdf.unstack().sort_index(), almost=True)
self.assert_eq(
psdf.unstack().unstack().sort_index(), pdf.unstack().unstack().sort_index(), almost=True
)
def test_pivot_errors(self):
psdf = ps.range(10)
with self.assertRaisesRegex(ValueError, "columns should be set"):
psdf.pivot(index="id")
with self.assertRaisesRegex(ValueError, "values should be set"):
psdf.pivot(index="id", columns="id")
def test_pivot_table_errors(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.pivot_table(index=["c"], columns="a", values=5))
msg = "index should be a None or a list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index="c", columns="a", values="b")
msg = "pivot_table doesn't support aggfunc as dict and without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"})
msg = "columns should be one column name."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns=["a"], values=["b"], aggfunc={"b": "mean", "e": "sum"})
msg = "Columns in aggfunc must be the same as values."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", aggfunc={"b": "mean", "e": "sum"}
)
msg = "values can't be a list without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"])
msg = "Wrong columns A."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["c"], columns="A", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
)
msg = "values should be one column or list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values=(["b"], ["c"]))
msg = "aggfunc must be a dict mapping from column name to aggregate functions"
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values="b", aggfunc={"a": lambda x: sum(x)})
psdf = ps.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(
index=["C"], columns="A", values=["B", "E"], aggfunc={"B": "mean", "E": "sum"}
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index=["C"], columns="A", values="B", aggfunc={"B": "mean"})
def test_transpose(self):
# TODO: what if with random index?
pdf1 = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}, columns=["col1", "col2"])
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
data={"score": [9, 8], "kids": [0, 0], "age": [12, 22]},
columns=["score", "kids", "age"],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
pdf3 = pd.DataFrame(
{
("cg1", "a"): [1, 2, 3],
("cg1", "b"): [4, 5, 6],
("cg2", "c"): [7, 8, 9],
("cg3", "d"): [9, 9, 9],
},
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
def _test_cummin(self, pdf, psdf):
self.assert_eq(pdf.cummin(), psdf.cummin())
self.assert_eq(pdf.cummin(skipna=False), psdf.cummin(skipna=False))
self.assert_eq(pdf.cummin().sum(), psdf.cummin().sum())
def test_cummin(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def test_cummin_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def _test_cummax(self, pdf, psdf):
self.assert_eq(pdf.cummax(), psdf.cummax())
self.assert_eq(pdf.cummax(skipna=False), psdf.cummax(skipna=False))
self.assert_eq(pdf.cummax().sum(), psdf.cummax().sum())
def test_cummax(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def test_cummax_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def _test_cumsum(self, pdf, psdf):
self.assert_eq(pdf.cumsum(), psdf.cumsum())
self.assert_eq(pdf.cumsum(skipna=False), psdf.cumsum(skipna=False))
self.assert_eq(pdf.cumsum().sum(), psdf.cumsum().sum())
def test_cumsum(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def test_cumsum_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def _test_cumprod(self, pdf, psdf):
self.assert_eq(pdf.cumprod(), psdf.cumprod(), almost=True)
self.assert_eq(pdf.cumprod(skipna=False), psdf.cumprod(skipna=False), almost=True)
self.assert_eq(pdf.cumprod().sum(), psdf.cumprod().sum(), almost=True)
def test_cumprod(self):
pdf = pd.DataFrame(
[[2.0, 1.0, 1], [5, None, 2], [1.0, -1.0, -3], [2.0, 0, 4], [4.0, 9.0, 5]],
columns=list("ABC"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_cumprod_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.rand(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_drop_duplicates(self):
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
# inplace is False
for keep in ["first", "last", False]:
with self.subTest(keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates("a", keep=keep).sort_index(),
psdf.drop_duplicates("a", keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
psdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
# inplace is False
for keep in ["first", "last", False]:
with self.subTest("multi-index columns", keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
psdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
psdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
)
# inplace is True
subset_list = [None, "a", ["a", "b"]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
pser = pdf.a
psser = psdf.a
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# multi-index columns, inplace is True
subset_list = [None, ("x", "a"), [("x", "a"), ("y", "b")]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
pser = pdf[("x", "a")]
psser = psdf[("x", "a")]
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 2, 2, 3], 20: ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.drop_duplicates(10, keep=keep).sort_index(),
psdf.drop_duplicates(10, keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([10, 20], keep=keep).sort_index(),
psdf.drop_duplicates([10, 20], keep=keep).sort_index(),
)
def test_reindex(self):
index = pd.Index(["A", "B", "C", "D", "E"])
columns = pd.Index(["numbers"])
pdf = pd.DataFrame([1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns = pd.Index(["numbers"], name="cols")
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "B"]).sort_index(), psdf.reindex(index=["A", "B"]).sort_index()
)
self.assert_eq(
pdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
psdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
psdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"]).sort_index(),
psdf.reindex(columns=["numbers"]).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"], copy=True).sort_index(),
psdf.reindex(columns=["numbers"], copy=True).sort_index(),
)
# Using float as fill_value to avoid int64/32 clash
self.assert_eq(
pdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
psdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
)
columns2 = | pd.Index(["numbers", "2", "3"]) | pandas.Index |
#-- -- -- -- Python Data Science Toolbox (Part 2):
# Used for Data Scientist Training Path
#FYI it's a compilation of how to work
#with different commands.
### --------------------------------------------------------
# # ------>>>>>Iterators vs Iterables
# Let's do a quick recall of what you've learned
# about iterables and iterators. Recall from the video
# that an iterable is an object that can return an iterator,
# while an iterator is an object that keeps state and produces
# the next value when you call next() on it. In this exercise,
# you will identify which object is an iterable and which is an iterator.
# The environment has been pre-loaded with the variables flash1 and flash2.
# Try printing out their values with print() and next() to figure out which
# is an iterable and which is an iterator
# R/ flash1 is an iterable and flash2 is an iterator.
### --------------------------------------------------------
# # ------>>>>> Iterating over iterables - ex#0
# Create a list of strings: flash
flash = ['<NAME>', '<NAME>', 'wally west', 'bart allen']
# Print each list item in flash using a for loop
for name in flash:
print(name)
# Create an iterator for flash: superspeed
superhero = iter(flash)
# Print each item from the iterator
print(next(superhero))
print(next(superhero))
print(next(superhero))
print(next(superhero))
### --------------------------------------------------------
# # ------>>>>> Iterating over iterables - ex#1
# Create an iterator for range(3): small_value
small_value = iter(range(3))
# Print the values in small_value
print(next(small_value))
print(next(small_value))
print(next(small_value))
# Loop over range(3) and print the values
for num in range(3):
print(num)
# Create an iterator for range(10 ** 100): googol
googol = iter(range(10 ** 100))
# Print the first 5 values from googol
print(next(googol))
print(next(googol))
print(next(googol))
print(next(googol))
print(next(googol))
### --------------------------------------------------------
# # ------>>>>> Iterators as function arguments
# Create a range object: values
values = range(10, 21)
# Print the range object
print(values)
# Create a list of integers: values_list
values_list = list(values)
# Print values_list
print(values_list)
# Get the sum of values: values_sum
values_sum = sum(values)
# Print values_sum
print(values_sum)
### --------------------------------------------------------
# Create a list of strings: mutants
mutants = ['<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>']
# Create a list of tuples: mutant_list
mutant_list = list(enumerate(mutants))
# Print the list of tuples
print(mutant_list)
# Unpack and print the tuple pairs
for index1, value1 in enumerate(mutants):
print(index1, value1)
# Change the start index
for index2, value2 in enumerate(mutants, start=1):
print(index2, value2)
### --------------------------------------------------------
# # ------>>>>> Using zip
# Create a list of tuples: mutant_data
mutant_data = list(zip(mutants, aliases, powers))
# Print the list of tuples
print(mutant_data)
# Create a zip object using the three lists: mutant_zip
mutant_zip = zip(mutants, aliases, powers)
# Print the zip object
print(mutant_zip)
# Unpack the zip object and print the tuple values
# Unpack the zip object and print the tuple values
for value1, value2, value3 in mutant_zip:
print(value1, value2, value3)
### --------------------------------------------------------
# # ------>>>>> Using * and zip to 'unzip'
# Create a zip object from mutants and powers: z1
z1 = zip(mutants, powers)
# Print the tuples in z1 by unpacking with *
print(*z1)
# Re-create a zip object from mutants and powers: z1
z1 = zip(mutants, powers)
# 'Unzip' the tuples in z1 by unpacking with * and zip(): result1, result2
result1, result2 = zip(*z1)
# Check if unpacked tuples are equivalent to original tuples
print(result1 == mutants)
print(result2 == powers)
### --------------------------------------------------------
# # ------>>>>> Processing large amounts of Twitter data
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Iterate over the file chunk by chunk
for chunk in pd.read_csv('./tweets.csv', chunksize=10):
# Iterate over the column in DataFrame
for entry in chunk['lang']:
if entry in counts_dict.keys():
counts_dict[entry] += 1
else:
counts_dict[entry] = 1
# Print the populated dictionary
print(counts_dict)
### --------------------------------------------------------
# # ------>>>>> Extracting information for large amounts of Twitter data
# Define count_entries()
def count_entries(csv_file, c_size, colname):
"""Return a dictionary with counts of
occurrences as value for each key."""
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Iterate over the file chunk by chunk
for chunk in pd.read_csv(csv_file, chunksize=c_size):
# Iterate over the column in DataFrame
for entry in chunk[colname]:
if entry in counts_dict.keys():
counts_dict[entry] += 1
else:
counts_dict[entry] = 1
# Return counts_dict
return counts_dict
# Call count_entries(): result_counts
result_counts = count_entries('./tweets.csv', 10, 'lang')
# Print result_counts
print(result_counts)
### --------------------------------------------------------
# # ------>>>>> Write a basic list comprehension
# In this exercise, you will practice what you've learned from
# the video about writing list comprehensions. You will write a
# list comprehension and identify the output that will be produced.
# The following list has been pre-loaded in the environment.
# doctor = ['house', 'cuddy', 'chase', 'thirteen', 'wilson']
# How would a list comprehension that produces a list of the first
# character of each string in doctor look like? Note that the list
# comprehension uses doc as the iterator variable. What will the output be?
# R/ The list comprehension is [doc[0] for doc in doctor] and produces
# the list ['h', 'c', 'c', 't', 'w'].
### --------------------------------------------------------
# # ------>>>>> List comprehension over iterables
# You know that list comprehensions can be built over iterables.
# Given the following objects below, which of these can we build list comprehensions over?
# doctor = ['house', 'cuddy', 'chase', 'thirteen', 'wilson']
# range(50)
# underwood = 'After all, we are nothing more or less than what we choose to reveal.'
# jean = '24601'
# flash = ['<NAME>', '<NAME>', 'w<NAME>', '<NAME>']
# valjean = 24601
# R/ You can build list comprehensions over all the objects except the integer object valjean.
### --------------------------------------------------------
# # ------>>>>> Writing list comprehensions
# Create list comprehension: squares
squares = [i ** 2 for i in range(10)]
print(squares)
### --------------------------------------------------------
# # ------>>>>> Nested list comprehensions
# Create a 5 x 5 matrix using a list of lists: matrix
matrix = [[col for col in range(5)] for row in range(5)]
# Print the matrix
for row in matrix:
print(row)
### --------------------------------------------------------
# # ------>>>>> Using conditionals in comprehensions - ex#0
# Create a list of strings: fellowship
fellowship = ['frodo', 'samwise', 'merry', 'aragorn', 'legolas', 'boromir', 'gimli']
# Create list comprehension: new_fellowship
new_fellowship = [member for member in fellowship if len(member) >= 7]
# Print the new list
print(new_fellowship)
### --------------------------------------------------------
# # ------>>>>> Using conditionals in comprehensions - ex#1
# Create a list of strings: fellowship
fellowship = ['frodo', 'samwise', 'merry', 'aragorn', 'legolas', 'boromir', 'gimli']
# Create list comprehension: new_fellowship
new_fellowship = [member if len(member) >= 7 else member.replace(
member, '') for member in fellowship]
# Print the new list
print(new_fellowship)
### --------------------------------------------------------
# # ------>>>>> Dict comprehensions
# Create a list of strings: fellowship
fellowship = ['frodo', 'samwise', 'merry', 'aragorn', 'legolas', 'boromir', 'gimli']
# Create dict comprehension: new_fellowship
new_fellowship = {member: len(member) for member in fellowship}
# Print the new list
print(new_fellowship)
### --------------------------------------------------------
# # ------>>>>> List comprehensions vs generators
# You've seen from the videos that list comprehensions and generator
# expressions look very similar in their syntax, except for the use of
# parentheses () in generator expressions and brackets [] in list comprehensions.
# In this exercise, you will recall the difference between
# list comprehensions and generators. To help with that task,
# the following code has been pre-loaded in the environment:
# # List of strings
# fellowship = ['frodo', 'samwise', 'merry', 'aragorn', 'legolas', 'boromir', 'gimli']
# # List comprehension
# fellow1 = [member for member in fellowship if len(member) >= 7]
# # Generator expression
# fellow2 = (member for member in fellowship if len(member) >= 7)
# Try to play around with fellow1 and fellow2 by figuring out their types and
# printing out their values. Based on your observations and what you can recall
# from the video, select from the options below the best description for the
# difference between list comprehensions and generators.
# R/ A list comprehension produces a list as output, a generator produces a generator object.
### --------------------------------------------------------
# # ------>>>>> Write your own generator expressions
# Create generator object: result
result = (num for num in range(31))
# Print the first 5 values
print(next(result))
print(next(result))
print(next(result))
print(next(result))
print(next(result))
# Print the rest of the values
for value in result:
print(value)
### --------------------------------------------------------
# # ------>>>>> Changing the output in generator expressions
# Create a list of strings: lannister
lannister = ['cersei', 'jaime', 'tywin', 'tyrion', 'joffrey']
# Create a generator object: lengths
lengths = (len(person) for person in lannister)
# Iterate over and print the values in lengths
for value in lengths:
print(value)
### --------------------------------------------------------
# # ------>>>>> Build a generator
# Create a list of strings
lannister = ['cersei', 'jaime', 'tywin', 'tyrion', 'joffrey']
# Define generator function get_lengths
def get_lengths(input_list):
"""Generator function that yields the
length of the strings in input_list."""
# Yield the length of a string
for person in input_list:
yield len(person)
# Print the values generated by get_lengths()
for value in get_lengths(lannister):
print(value)
### --------------------------------------------------------
# # ------>>>>> List comprehensions for time-stamped data
# Extract the created_at column from df: tweet_time
tweet_time = df['created_at']
# Extract the clock time: tweet_clock_time
tweet_clock_time = [entry[11:19] for entry in tweet_time]
# Print the extracted times
print(tweet_clock_time)
### --------------------------------------------------------
# # ------>>>>> Conditional list comprehensions for time-stamped data
# Extract the created_at column from df: tweet_time
tweet_time = df['created_at']
# Extract the clock time: tweet_clock_time
tweet_clock_time = [entry[11:19] for entry in tweet_time if entry[17:19] == '19']
# Print the extracted times
print(tweet_clock_time)
### --------------------------------------------------------
# # ------>>>>> Dictionaries for data science
# Zip lists: zipped_lists
zipped_lists = zip(feature_names, row_vals)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Print the dictionary
print(rs_dict)
### --------------------------------------------------------
# # ------>>>>> Writing a function to help you
# Define lists2dict()
def lists2dict(list1, list2):
"""Return a dictionary where list1 provides
the keys and list2 provides the values."""
# Zip lists: zipped_lists
zipped_lists = zip(list1, list2)
# Create a dictionary: rs_dict
rs_dict = dict(zipped_lists)
# Return the dictionary
return rs_dict
# Call lists2dict: rs_fxn
rs_fxn = lists2dict(feature_names, row_vals)
# Print rs_fxn
print(rs_fxn)
### --------------------------------------------------------
# # ------>>>>> Using a list comprehension
# Print the first two lists in row_lists
print(row_lists[0])
print(row_lists[1])
# Turn list of lists into list of dicts: list_of_dicts
list_of_dicts = [lists2dict(feature_names, sublist) for sublist in row_lists]
# Print the first two dictionaries in list_of_dicts
print(list_of_dicts[0])
print(list_of_dicts[1])
### --------------------------------------------------------
# # ------>>>>> Turning this all into a DataFrame
# Import the pandas package
import pandas as pd
# Turn list of lists into list of dicts: list_of_dicts
list_of_dicts = [lists2dict(feature_names, sublist) for sublist in row_lists]
# Turn list of dicts into a DataFrame: df
df = pd.DataFrame(list_of_dicts)
# Print the head of the DataFrame
print(df.head())
### --------------------------------------------------------
# # ------>>>>> Processing data in chunks - ex#0
with open('world_dev_ind.csv') as file:
# Skip the column names
file.readline()
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Process only the first 1000 rows
for j in range(0, 1000):
# Split the current line into a list: line
line = file.readline().split(',')
# Get the value for the first column: first_col
first_col = line[0]
# If the column value is in the dict, increment its value
if first_col in counts_dict.keys():
counts_dict[first_col] += 1
# Else, add to the dict and set value to 1
else:
counts_dict[first_col] = 1
# Print the resulting dictionary
print(counts_dict)
### --------------------------------------------------------
# # ------>>>>> Writing a generator to load data in chunks (2)
# Define read_large_file()
def read_large_file(file_object):
"""A generator function to read a large file lazily."""
# Loop indefinitely until the end of the file
while True:
# Read a line from the file: data
data = file_object.readline()
# Break if this is the end of the file
if not data:
break
# Yield the line of data
yield data
# Open a connection to the file
with open('world_dev_ind.csv') as file:
# Create a generator object for the file: gen_file
gen_file = read_large_file(file)
# Print the first three lines of the file
print(next(gen_file))
print(next(gen_file))
print(next(gen_file))
### --------------------------------------------------------
# # ------>>>>> Writing a generator to load data in chunks (3)
# Initialize an empty dictionary: counts_dict
counts_dict = {}
# Open a connection to the file
with open('world_dev_ind.csv') as file:
# Iterate over the generator from read_large_file()
for line in read_large_file(file):
row = line.split(',')
first_col = row[0]
if first_col in counts_dict.keys():
counts_dict[first_col] += 1
else:
counts_dict[first_col] = 1
# Print
print(counts_dict)
### --------------------------------------------------------
# # ------>>>>> Writing an iterator to load data in chunks - ex#0
# Import the pandas package
import pandas as pd
# Initialize reader object: df_reader
df_reader = | pd.read_csv('ind_pop.csv', chunksize=10) | pandas.read_csv |
import os
import re
import csv
import math
from collections import defaultdict
from scipy.signal import butter, lfilter
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from statistics import mean
from scipy.stats import kurtosis, skew
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from numpy import mean
from numpy import std
#from numpy import dot, sum, tile, linalg, det
#from numpy.linalg import inv, det
class DataManager:
def __init__(self, filename, person, fall):
self.filename = filename
self.person = person
self.fall = fall
self.params = []
self.parsed_data = []
self.acc_1_factor = (2*16)/(2**13)
self.sis_params = {
0: 'acc_x_1',
1: 'acc_y_1',
2: 'acc_z_1',
}
self.new_path = '/Users/kaushikkota/ms_cs_uw_madison/cs_799/CS799-Project/SisFall_Phases'
self.max_val = 0
def get_sis_fall_params(self):
try:
with open('/Users/kaushikkota/ms_cs_uw_madison/cs_799/CS799-Project/SisFall_dataset/' + self.person + '/' + self.filename) as f:
contents = f.readlines()
for line in contents:
sensor_data = {}
data_sample = line.strip().replace(';', '').split(',')
for i in range(3):
formatted_sample = data_sample[i].lstrip()
sensor_data[self.sis_params[i]] = int(formatted_sample)
if i in [0, 1, 2]:
sensor_data[self.sis_params[i]] = self.acc_1_factor*sensor_data[self.sis_params[i]]
self.parsed_data.append(sensor_data)
except FileNotFoundError:
return {}, False
acc_x_data, acc_y_data, acc_z_data, svm_data = [], [], [], []
data_pre_peak, data_post_peak = pd.DataFrame(), pd.DataFrame()
data_pre_impact, data_post_impact = pd.DataFrame(), | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
class MovingAverage:
def __init__(self, df, outcome, target_days=np.array([5])):
self.df = df
self.outcome = outcome
self.target_days = target_days[0] # Just take the first one for now
def predict(self, window=5):
moving_average = []
outcomes = self.df[self.outcome].values
outcomes = np.stack(outcomes) # Flatten to get matrix
for i in range(0, len(self.df)):
ts = outcomes[i,:]
# Assert that we have a long enough array
try:
assert len(ts) >= window
except:
a = 1
#print("Array length too small for given window size. Please pass larger array or lower window.")
ts_cp = np.array(ts, dtype=float)
for j in range(0, self.target_days):
pred_ma = np.mean(ts_cp[-window:])
ts_cp = np.append(ts_cp, pred_ma)
moving_average.append([ts_cp[-1]])
self.predictions = moving_average
## TESTS
# Test 3 day out prediction
def moving_average_test():
true = [3.6799999999999997, 12.68]
## Create Fake Data
dicti = {
"hosp": ["1", "2"],
"hospitalizations": [np.array([1,2,3,4,5]), np.array([10, 11, 12, 13, 14])]
}
df = | pd.DataFrame(dicti) | pandas.DataFrame |
"""
This script is used to split the storm events downloaded from the website.
"""
# import packages
import numpy as np
import pandas as pd
import os
import datetime
# define the repository path
from common_settings import obspath
import matplotlib.pyplot as plt
import seaborn as sns
# separate the long time series into one-year data sets
file_name = '126001A_daily.csv'
df = pd.read_csv(obspath + file_name, index_col='Datetime')
# conver the index of df into datetime
time_format = "%H:%M:%S %d/%m/%Y"
df.index = pd.to_datetime(df.index, format=time_format)
# df.set_index(['Time'], inplace=True)
for year in range(2006, 2020):
print(year)
time_str = [f'00:00:00 01/07/{year}', f'00:00:00 30/06/{year+1}']
time_period = [ | pd.datetime.strptime(time_str[0], time_format) | pandas.datetime.strptime |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
from wordcloud import WordCloud, STOPWORDS
import glob
from datetime import datetime
import sklearn #A Machine Learning library
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# # Acquire Data
# In[ ]:
df = pd.read_csv("../../../input/joniarroba_noshowappointments/KaggleV2-May-2016.csv")
# # Prepare Data
# ## Explore
# In[ ]:
df.info()
# ### Describe data
# In[ ]:
df.describe()
# In[ ]:
df.head(3)
# In[ ]:
print('Alcoholism', df.Alcoholism.unique())
print('Handcap', df.Handcap.unique())
print('Diabetes', df.Diabetes.unique())
print('Hipertension', df.Hipertension.unique())
# ### Data Validation
# In[ ]:
#Checking if there are are any missing values
df.isnull().sum()
# ## Visualize Data(Data Distribution)
# In[ ]:
#Extract date
df['AppointmentDay']= | pd.to_datetime(df['AppointmentDay']) | pandas.to_datetime |
import csv
from pathlib import Path
import numpy as np
import matplotlib
from matplotlib import colors as col
from matplotlib import cm
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import plottools
_newmap = col.LinearSegmentedColormap.from_list("Magentas", [
(1.0, 1.0, 1.0, 1.0),
(226/255, 0, 116/225, 1.0)])
cm.register_cmap(name="Magentas", cmap=_newmap)
def prefilter(data):
"""Remove artifact candidates."""
new_data = data[data["ID"] != "CASP5_A8_m1"]
new_data = new_data[new_data["ID"] != "CASP5_A8_m2"]
return new_data
def read_data(csv_path, delimiter = ";", filter=prefilter):
"""Read data."""
with csv_path.open() as csv_file:
data = pd.read_csv(csv_file, delimiter=delimiter)
return data
def parse_renamings(renamings, delimiter=";"):
"""Reads all given internal to HUGO renamings from a csv file."""
data = pd.read_csv(renamings, delimiter=delimiter)
result = {}
for row in data.itertuples():
result[row[1]] = row[2]
return result
def rename_candidate_hugo(candidate, renamings):
"""Renames a candidate name according to a renaming map."""
candidate_split = candidate.split("_")
name_expr = candidate_split[0].split(".")
base_name = name_expr[0]
if base_name in renamings:
base_name = renamings[base_name]
name_expr[0] = base_name
name_start = ".".join(name_expr)
candidate_split[0] = name_start
result = "_".join(candidate_split)
return result
def rename_candidate(candidate, renamings=None):
"""Renames a candidate name according to a set of renamings."""
result = candidate
if renamings != None:
result = rename_candidate_hugo(candidate, renamings)
if result == "RNF43_C6_m1":
result = "RNF43.A2_m1"
elif result == "RNF43_C6_m2":
result = "RNF43.A2_m2"
elif result == "RNF43_G7_m1":
result = "RNF43.A3_m1"
elif result == "RNF43_G7_m2":
result = "RNF43.A3_m2"
return result
def filter_same(data, min_distance=0, by_hla=None, length_total=200, renamings=None):
"""Filters a data frame removing duplicated epitopes."""
candidate_rows = {}
if by_hla != None:
data = data[data["ID"] == by_hla]
data = data.sort_values(["HLA", "Pos"])
candidates = data["HLA"].unique()
else:
data = data.sort_values(["ID", "Pos"])
candidates = data["ID"].unique()
for cand in candidates:
candidate_rows[rename_candidate(cand)] = np.zeros(length_total)
candidate_dicts = []
current_candidate = None
current_position = 0
current_length = 0
for row in data.itertuples():
if by_hla != None:
candidate = getattr(row, "HLA")
else:
candidate = rename_candidate(getattr(row, "ID"))
position = getattr(row, "Pos")
length = len(getattr(row, "Peptide"))
distance = position - current_position
if current_candidate == None:
current_candidate = candidate
current_position = position
current_length = length
continue
elif current_candidate != candidate:
candidate_dicts.append({
"ID": current_candidate, "Pos": current_position,
"length": current_length, "HLA": current_candidate
})
current_candidate = candidate
current_position = position
current_length = length
continue
if distance < min_distance:
current_length = position - current_position + length
else:
candidate_dicts.append({
"ID": current_candidate, "Pos": current_position,
"length": current_length, "HLA": current_candidate
})
current_candidate = candidate
current_position = position
current_length = length
for candidate_dict in candidate_dicts:
candidate = candidate_dict["ID"]
position = candidate_dict["Pos"]
length = candidate_dict["length"]
for idx in range(position, position + length):
candidate_rows[candidate][idx] += 1.0
return candidate_dicts, candidate_rows
def maximum_overlap(posarray):
maximum = 0.0
for key in posarray.keys():
for elem in posarray[key]:
if elem > maximum:
maximum = elem
return int(maximum)
def read_peptide_lengths(path, renamings=None):
"""Reads a set of peptide lengths from file."""
result = {}
with open(path) as csv_file:
rd = csv.reader(csv_file, delimiter=",")
for row in rd:
if not(rename_candidate(row[1], renamings=renamings) in result):
result[rename_candidate(row[1], renamings=renamings)] = 0
result[rename_candidate(row[1], renamings=renamings)] += len(row[0])
return result
if __name__ == "__main__":
def plot_many(posarray, hlas, candidates, cmap_code="Magentas", output_path="outputimages/out_epitopes.png", width=20, height=140,
x_label="foo", y_label="bar", plot_title="baz", cbar_label="quux"):
length = len(posarray)
fig, ax = plt.subplots(nrows=length, figsize=(width, height), sharex=True, sharey=True)
fig.subplots_adjust(hspace=0, wspace=0)
xticks = [idx * 10 for idx in range(20)] + [200]
yticks = [idx for idx in range(len(hlas) + 1)]
ax[0].set_xticks(xticks)
ax[0].set_yticks(yticks)
ax[0].set_yticklabels(["all"] + hlas)
xl = ax[-1].set_xlabel(x_label)
fig.text(0.06, 0.5, y_label, ha='center', va='center', rotation='vertical')
xl.set_fontsize(40)
ax[0].xaxis.labelpad = 20
ax[0].yaxis.labelpad = 20
for idx in range(length):
pt = ax[idx].set_title(candidates[idx])
pt.set_fontsize(24)
pt.set_fontweight("bold")
ax[idx].set_aspect("auto")
ax[idx].axhline(0.5, color="black")
cmap = matplotlib.cm.get_cmap(cmap_code)
cmap.set_bad(color="black")
allhlas = np.zeros(200)
for row in hlas:
allhlas += posarray[idx][row] if row in posarray[idx].keys() else np.zeros(200)
array = np.asarray([allhlas] + [posarray[idx][row] if row in posarray[idx].keys() else np.zeros(200) for row in hlas])
for label in ax[idx].get_yticklabels():
label.set_fontsize(24)
label.set_fontweight("bold")
for label in ax[idx].get_xticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
label.set_fontsize(24)
label.set_fontweight("bold")
ax[idx].patch.set(hatch="xxxxxxx", edgecolor="black")
im = ax[idx].imshow(array, cmap=cmap, vmax=4, vmin=0, aspect="auto")
plt.savefig(output_path, dpi=300)
plt.close()
## Plot all those things as separate figures also:
for idx in range(length):
fig, ax = plt.subplots(figsize=(20, 20))
pt = ax.set_title(candidates[idx])
pt.set_fontsize(24)
pt.set_fontweight("bold")
ax.set_aspect("auto")
ax.axhline(0.5, color="black")
cmap = matplotlib.cm.get_cmap(cmap_code)
cmap.set_bad(color="black")
allhlas = np.zeros(200)
for row in hlas:
allhlas += posarray[idx][row] if row in posarray[idx].keys() else np.zeros(200)
array = np.asarray([allhlas] + [posarray[idx][row] if row in posarray[idx].keys() else np.zeros(200) for row in hlas])
for label in ax.get_yticklabels():
label.set_fontsize(24)
label.set_fontweight("bold")
for label in ax.get_xticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
label.set_fontsize(24)
label.set_fontweight("bold")
ax.patch.set(hatch="xxxxxxx", edgecolor="black")
im = ax.imshow(array, cmap=cmap, vmax=4, vmin=0, aspect="auto")
plt.savefig(output_path + ".subplot." + candidates[idx] + ".png", dpi=300)
plt.close()
def plot_many_complete(posarray_strong, posarray_weak, posarray_garbage, hlas, candidates, lengths, cmap_code="Magentas", output_path="outputimages/out_epitopes.png", width=20, height=140,
x_label="foo", y_label="bar", plot_title="baz", cbar_label="quux", renamings=None):
length = len(posarray_garbage)
indices = list(range(len(candidates)))
indices.sort(key=lambda x: -lengths[rename_candidate(candidates[x])])
candidates.sort(key=lambda x: -lengths[rename_candidate(x)])
posarray_strong = [posarray_strong[idx] for idx in indices]
posarray_weak = [posarray_weak[idx] for idx in indices]
posarray_garbage = [posarray_garbage[idx] for idx in indices]
max_weak = 14 + 1#max([maximum_overlap(elem) + 1 for elem in posarray_weak])
max_strong = 7 + 1#max([maximum_overlap(elem) + 1 for elem in posarray_strong])
max_garbage = 29 + 1#max([maximum_overlap(elem) + 1 for elem in posarray_garbage])
length_total = max(map(lambda x: lengths[rename_candidate(x)], candidates))
fig, ax = plt.subplots(nrows=length, ncols=3, figsize=(width, height), sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0)
xticks = [idx for idx in range(0, length_total - 5, 10)]
yticks = [idx for idx in range(len(hlas) + 1)]
if length > 1:
ax[0, 0].set_xticks(xticks)
ax[0, 0].set_yticks(yticks)
ax[0, 0].set_yticklabels(["all"] + hlas)
else:
ax[0].set_xticks(xticks)
ax[0].set_yticks(yticks)
ax[0].set_yticklabels(["all"] + hlas)
maxval = 4
cmap_strong = matplotlib.cm.get_cmap(cmap_code)
cmap_strong = plottools.cmap_discretize(cmap_strong, max_strong)
cmap_strong.set_bad(color="#ccccccff")
cmap_weak = plottools.desaturate(matplotlib.cm.get_cmap(cmap_code))
cmap_garbage = plottools.desaturate(cmap_weak)
cmap_weak = plottools.cmap_discretize(cmap_weak, max_weak)
cmap_weak.set_bad(color="#ccccccff")
cmap_garbage = plottools.cmap_discretize(cmap_garbage, max_garbage)
cmap_garbage.set_bad(color="#ccccccff")
for idx in range(length):
for idy in range(3):
# if length > 1:
# pt = plt.text(0.95, 0.05, rename_candidate(candidates[idx], renamings=renamings)[:-3],
# transform=ax[idx, idy].transAxes,
# horizontalalignment="right",
# verticalalignment="bottom")
# else:
# pt = plt.text(0.95, 0.05, rename_candidate(candidates[idx], renamings=renamings)[:-3],
# transform=ax[idy].transAxes,
# horizontalalignment="right",
# verticalalignment="bottom")
# pt.set_fontsize(24)
# pt.set_fontweight("bold")
if length > 1:
ax[idx, idy].set_aspect("auto")
ax[idx, idy].axhline(0.5, color="black")
if idy == 0:
for label in ax[idx, idy].get_yticklabels():
label.set_fontsize(24)
label.set_fontweight("bold")
if idx == length - 1:
for label in ax[idx, idy].get_xticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
label.set_fontsize(24)
label.set_fontweight("bold")
else:
ax[idy].set_aspect("auto")
ax[idy].axhline(0.5, color="black")
if idy == 0:
for label in ax[idy].get_yticklabels():
label.set_fontsize(24)
label.set_fontweight("bold")
if idx == length - 1:
for label in ax[idy].get_xticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
label.set_fontsize(24)
label.set_fontweight("bold")
allhlas_strong = np.zeros(length_total)
allhlas_weak = np.zeros(length_total)
allhlas_garbage = np.zeros(length_total)
for row in hlas:
if row in posarray_strong[idx].keys():
posarray_strong[idx][row][lengths[rename_candidate(candidates[idx])]:] = float("nan")
else:
posarray_strong[idx][row] = np.zeros(length_total)
posarray_strong[idx][row][lengths[rename_candidate(candidates[idx])]:] = float("nan")
if row in posarray_weak[idx].keys():
posarray_weak[idx][row][lengths[rename_candidate(candidates[idx])]:] = float("nan")
else:
posarray_weak[idx][row] = np.zeros(length_total)
posarray_weak[idx][row][lengths[rename_candidate(candidates[idx])]:] = float("nan")
if row in posarray_garbage[idx].keys():
posarray_garbage[idx][row][lengths[rename_candidate(candidates[idx])]:] = float("nan")
else:
posarray_garbage[idx][row] = np.zeros(length_total)
posarray_garbage[idx][row][lengths[rename_candidate(candidates[idx])]:] = float("nan")
allhlas_strong += posarray_strong[idx][row] if row in posarray_strong[idx].keys() else np.zeros(length_total)
allhlas_weak += posarray_weak[idx][row] if row in posarray_weak[idx].keys() else np.zeros(length_total)
allhlas_garbage += posarray_garbage[idx][row] if row in posarray_garbage[idx].keys() else np.zeros(length_total)
array_strong = np.asarray([allhlas_strong] + [posarray_strong[idx][row] if row in posarray_strong[idx].keys() else np.zeros(length_total) for row in hlas])
array_weak = np.asarray([allhlas_weak] + [posarray_weak[idx][row] if row in posarray_weak[idx].keys() else np.zeros(length_total) for row in hlas])
array_garbage = np.asarray([allhlas_garbage] + [posarray_garbage[idx][row] if row in posarray_garbage[idx].keys() else np.zeros(length_total) for row in hlas])
if length > 1:
im_strong = ax[idx, 0].imshow(array_strong, cmap=cmap_strong, vmax=max_strong, vmin=0, aspect="auto")
im_weak = ax[idx, 1].imshow(array_weak, cmap=cmap_weak, vmax=max_weak, vmin=0, aspect="auto")
im_garbage = ax[idx, 2].imshow(array_garbage, cmap=cmap_garbage, vmax=max_garbage, vmin=0, aspect="auto")
else:
im_strong = ax[0].imshow(array_strong, cmap=cmap_strong, vmax=max_strong, vmin=0, aspect="auto")
im_weak = ax[1].imshow(array_weak, cmap=cmap_weak, vmax=max_weak, vmin=0, aspect="auto")
im_garbage = ax[2].imshow(array_garbage, cmap=cmap_garbage, vmax=max_garbage, vmin=0, aspect="auto")
plt.savefig(output_path, dpi=300)
plt.close()
def plot_complete_h(strong_posarray_m1, weak_posarray_m1, garbage_posarray_m1,
strong_posarray_m2, weak_posarray_m2, garbage_posarray_m2,
mutation_counts, length_total = 200,
width=40, height=40, output_path="outputimages/plot_complete.png",
x_label="foo", y_label="bar", renamings=None):
max_weak = max(maximum_overlap(weak_posarray_m1), maximum_overlap(weak_posarray_m2)) + 1
max_strong = max(maximum_overlap(strong_posarray_m1), maximum_overlap(strong_posarray_m2)) + 1
max_garbage = max(maximum_overlap(garbage_posarray_m1), maximum_overlap(garbage_posarray_m2)) + 1
cmap_m1 = matplotlib.cm.get_cmap("Magentas")
cmap_m1 = plottools.cmap_discretize(cmap_m1, max_strong)
cmap_m1.set_bad(color="#ccccccff")
cmap_weak_m1 = plottools.desaturate(matplotlib.cm.get_cmap("Magentas"))
cmap_garbage_m1 = plottools.desaturate(cmap_weak_m1)
cmap_weak_m1 = plottools.cmap_discretize(cmap_weak_m1, max_weak)
cmap_weak_m1.set_bad(color="#ccccccff")
cmap_garbage_m1 = plottools.cmap_discretize(cmap_garbage_m1, max_garbage)
cmap_garbage_m1.set_bad(color="#ccccccff")
cmap_m2 = matplotlib.cm.get_cmap("Greens")
cmap_m2 = plottools.cmap_discretize(cmap_m2, max_strong)
cmap_m2.set_bad(color="#ccccccff")
cmap_weak_m2 = plottools.desaturate(matplotlib.cm.get_cmap("Greens"))
cmap_garbage_m2 = plottools.desaturate(cmap_weak_m2)
cmap_weak_m2 = plottools.cmap_discretize(cmap_weak_m2, max_weak)
cmap_weak_m2.set_bad(color="#ccccccff")
cmap_garbage_m2 = plottools.cmap_discretize(cmap_garbage_m2, max_garbage)
cmap_garbage_m2.set_bad(color="#ccccccff")
candidates_garbage_m1 = [key for key in mutation_counts.keys() if key.endswith("_m1")]
candidates_garbage_m1.sort(key=lambda x: rename_candidate(x, renamings=renamings))
candidates_garbage_m2 = [key for key in mutation_counts.keys() if key.endswith("_m2")]
candidates_garbage_m2.sort(key=lambda x: rename_candidate(x, renamings=renamings))
array_strong_m1 = np.asarray([strong_posarray_m1[row] if row in strong_posarray_m1.keys() else np.zeros(length_total) for row in candidates_garbage_m1])
array_weak_m1 = np.asarray([weak_posarray_m1[row] if row in weak_posarray_m1.keys() else np.zeros(length_total) for row in candidates_garbage_m1])
array_garbage_m1 = np.asarray([garbage_posarray_m1[row] if row in garbage_posarray_m1.keys() else np.zeros(length_total) for row in candidates_garbage_m1])
array_strong_m2 = np.asarray([strong_posarray_m2[row] if row in strong_posarray_m2.keys() else np.zeros(length_total) for row in candidates_garbage_m2])
array_weak_m2 = np.asarray([weak_posarray_m2[row] if row in weak_posarray_m2.keys() else np.zeros(length_total) for row in candidates_garbage_m2])
array_garbage_m2 = np.asarray([garbage_posarray_m2[row] if row in garbage_posarray_m2.keys() else np.zeros(length_total) for row in candidates_garbage_m2])
xticks = [idx * 10 for idx in range(21)]
yticks = [idx for idx in range(len(candidates_garbage_m1))]
fig, ax = plt.subplots(nrows=2, ncols=8,
gridspec_kw = {
'height_ratios': [1, 80],
'width_ratios': [3, 3, 3, 1,
3, 3, 3, 1]
}, sharex=False, sharey=False,
figsize=(width, height))
fig.subplots_adjust(hspace=0, wspace=0)
# axis sharing and deletion
ax[1, 1].get_shared_y_axes().join(ax[1, 1], ax[1, 2])
ax[1, 2].get_shared_y_axes().join(ax[1, 2], ax[1, 3])
ax[1, 5].get_shared_y_axes().join(ax[1, 5], ax[1, 6])
ax[1, 6].get_shared_y_axes().join(ax[1, 6], ax[1, 7])
ax[0, 3].set_axis_off()
ax[0, 7].set_axis_off()
single_ax = ax[1, 0]
single_ax.set_aspect("auto")
single_ax.set_yticks(yticks)
for elem in candidates_garbage_m1:
print(rename_candidate(elem, renamings=renamings), renamings, elem)
single_ax.set_yticklabels(map(lambda x: x[:-3],
map(lambda x: rename_candidate(x, renamings=renamings), candidates_garbage_m1)))
for label in single_ax.get_yticklabels():
label.set_fontsize(24)
label.set_fontweight("bold")
for ax_idy in (list(range(3)) + list(range(4, 7))):
single_ax = ax[1, ax_idy]
single_ax.grid(True, which='major', axis='x', linestyle='--')
single_ax.set_aspect("auto")
if ax_idy > 0:
single_ax.set_yticks(yticks)
single_ax.set_yticklabels([])
single_ax.set_adjustable("box-forced")
single_ax.set_xticks(xticks)
for label in single_ax.get_xticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
label.set_fontsize(24)
label.set_fontweight("bold")
# Mutation counts:
ax[1, 3].barh(list(range(len(candidates_garbage_m1))), [200 * mutation_counts[key] if key in mutation_counts.keys() else 300.0 for key in candidates_garbage_m1], height=1.0, color="black")
ax[1, 7].barh(list(range(len(candidates_garbage_m2))), [200 * mutation_counts[key] if key in mutation_counts.keys() else 300.0 for key in candidates_garbage_m2], height=1.0, color="black")
ax[1, 3].set_aspect("auto")
ax[1, 3].set_xticks([20 * x for x in range(11)])
ax[1, 3].set_xticklabels([f"{x * 0.1:.1f}" for x in range(10)])
ax[1, 3].set_yticks(yticks)
ax[1, 3].set_yticklabels([])
for label in ax[1, 3].get_xticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
label.set_fontsize(16)
label.set_fontweight("bold")
ax[1, 7].set_aspect("auto")
ax[1, 7].set_xticks([20 * x for x in range(11)])
ax[1, 7].set_xticklabels([f"{x * 0.1:.1f}" for x in range(11)])
ax[1, 7].set_yticks(yticks)
ax[1, 7].set_yticklabels([])
for label in ax[1, 7].get_xticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
label.set_fontsize(16)
label.set_fontweight("bold")
im_strong_m1 = ax[1, 0].imshow(array_strong_m1, cmap=cmap_m1, vmin=0.0, vmax=max_strong, aspect="auto")
im_weak_m1 = ax[1, 1].imshow(array_weak_m1, cmap=cmap_weak_m1, vmin=0.0, vmax=max_weak, aspect="auto")
im_garbage_m1 = ax[1, 2].imshow(array_garbage_m1, cmap=cmap_garbage_m1, vmin=0.0, vmax=max_garbage, aspect="auto")
im_strong_m2 = ax[1, 4].imshow(array_strong_m2, cmap=cmap_m2, vmin=0.0, vmax=max_strong, aspect="auto")
im_weak_m2 = ax[1, 5].imshow(array_weak_m2, cmap=cmap_weak_m2, vmin=0.0, vmax=max_weak, aspect="auto")
im_garbage_m2 = ax[1, 6].imshow(array_garbage_m2, cmap=cmap_garbage_m2, vmin=0.0, vmax=max_garbage, aspect="auto")
cbar_strong_m1 = fig.colorbar(im_strong_m1, cax=ax[0, 0], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
cbar_weak_m1 = fig.colorbar(im_weak_m1, cax=ax[0, 1], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
cbar_garbage_m1 = fig.colorbar(im_garbage_m1, cax=ax[0, 2], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
cbar_strong_m2 = fig.colorbar(im_strong_m2, cax=ax[0, 4], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
cbar_weak_m2 = fig.colorbar(im_weak_m2, cax=ax[0, 5], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
cbar_garbage_m2 = fig.colorbar(im_garbage_m2, cax=ax[0, 6], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
cbar_strong_m1.set_ticks([float(idx) for idx in range(max_strong)])
cbar_weak_m1.set_ticks([float(idx) for idx in range(0, max_weak, 2)])
cbar_garbage_m1.set_ticks([float(idx) for idx in range(0, max_garbage, 5)])
cbar_strong_m1.ax.set_xticklabels([idx for idx in range(max_strong)])
cbar_weak_m1.ax.set_xticklabels([idx for idx in range(0, max_weak, 2)])
cbar_garbage_m1.ax.set_xticklabels([idx for idx in range(0, max_garbage, 5)])
cbar_strong_m1.ax.xaxis.set_ticks_position("top")
cbar_weak_m1.ax.xaxis.set_ticks_position("top")
cbar_garbage_m1.ax.xaxis.set_ticks_position("top")
cbar_strong_m1.ax.xaxis.set_label_position("top")
cbar_weak_m1.ax.xaxis.set_label_position("top")
cbar_garbage_m1.ax.xaxis.set_label_position("top")
cbar_strong_m2.set_ticks([float(idx) for idx in range(max_strong)])
cbar_weak_m2.set_ticks([float(idx) for idx in range(0, max_weak, 2)])
cbar_garbage_m2.set_ticks([float(idx) for idx in range(0, max_garbage, 5)])
cbar_strong_m2.ax.set_xticklabels([idx for idx in range(max_strong)])
cbar_weak_m2.ax.set_xticklabels([idx for idx in range(0, max_weak, 2)])
cbar_garbage_m2.ax.set_xticklabels([idx for idx in range(0, max_garbage, 5)])
cbar_strong_m2.ax.xaxis.set_ticks_position("top")
cbar_weak_m2.ax.xaxis.set_ticks_position("top")
cbar_garbage_m2.ax.xaxis.set_ticks_position("top")
cbar_strong_m2.ax.xaxis.set_label_position("top")
cbar_weak_m2.ax.xaxis.set_label_position("top")
cbar_garbage_m2.ax.xaxis.set_label_position("top")
for label in cbar_weak_m1.ax.get_xticklabels():
label.set_fontsize(16)
label.set_fontweight("bold")
for label in cbar_strong_m1.ax.get_xticklabels():
label.set_fontsize(16)
label.set_fontweight("bold")
for label in cbar_garbage_m1.ax.get_xticklabels():
label.set_fontsize(16)
label.set_fontweight("bold")
for label in cbar_weak_m2.ax.get_xticklabels():
label.set_fontsize(16)
label.set_fontweight("bold")
for label in cbar_strong_m2.ax.get_xticklabels():
label.set_fontsize(16)
label.set_fontweight("bold")
for label in cbar_garbage_m2.ax.get_xticklabels():
label.set_fontsize(16)
label.set_fontweight("bold")
plt.savefig(output_path, dpi=300)
plt.close("all")
def plot_complete_strong_h(strong_posarray_m1, strong_posarray_m2,
mutation_counts, length_total = 200,
width=40, height=40, output_path="outputimages/plot_complete.png",
x_label="foo", y_label="bar", renamings=None):
max_strong = max(maximum_overlap(strong_posarray_m1), maximum_overlap(strong_posarray_m2)) + 1
cmap_m1 = matplotlib.cm.get_cmap("Magentas")
cmap_m1 = plottools.cmap_discretize(cmap_m1, max_strong)
cmap_m1.set_bad(color="#ccccccff")
cmap_m2 = matplotlib.cm.get_cmap("Greens")
cmap_m2 = plottools.cmap_discretize(cmap_m2, max_strong)
cmap_m2.set_bad(color="#ccccccff")
candidates_garbage_m1 = [key for key in mutation_counts.keys() if key.endswith("_m1")]
candidates_garbage_m1.sort(key=lambda x: rename_candidate(x, renamings=renamings))
candidates_garbage_m2 = [key for key in mutation_counts.keys() if key.endswith("_m2")]
candidates_garbage_m2.sort(key=lambda x: rename_candidate(x, renamings=renamings))
array_strong_m1 = np.asarray([strong_posarray_m1[row][:length_total] if row in strong_posarray_m1.keys() else np.zeros(length_total) for row in candidates_garbage_m1])
array_strong_m2 = np.asarray([strong_posarray_m2[row][:length_total] if row in strong_posarray_m2.keys() else np.zeros(length_total) for row in candidates_garbage_m2])
xticks = [idx * 10 for idx in range(21)]
yticks = [idx for idx in range(len(candidates_garbage_m1))]
fig, ax = plt.subplots(nrows=2, ncols=4,
gridspec_kw = {
'height_ratios': [1, 80],
'width_ratios': [3, 1,
3, 1]
}, sharex=False, sharey=False,
figsize=(width, height))
fig.subplots_adjust(hspace=0, wspace=0)
# axis sharing and deletion
ax[1, 1].get_shared_y_axes().join(ax[1, 1], ax[1, 2])
ax[1, 2].get_shared_y_axes().join(ax[1, 2], ax[1, 3])
ax[0, 1].set_axis_off()
ax[0, 3].set_axis_off()
single_ax = ax[1, 0]
single_ax.set_aspect("auto")
single_ax.set_yticks(yticks)
for elem in candidates_garbage_m1:
print(rename_candidate(elem, renamings=renamings), renamings, elem)
single_ax.set_yticklabels(map(lambda x: x[:-3],
map(lambda x: rename_candidate(x, renamings=renamings), candidates_garbage_m1)))
for label in single_ax.get_yticklabels():
label.set_fontsize(24)
label.set_fontweight("bold")
for ax_idy in [0, 2]:
single_ax = ax[1, ax_idy]
single_ax.grid(True, which='major', axis='x', linestyle='--')
single_ax.set_aspect("auto")
if ax_idy > 0:
single_ax.set_yticks(yticks)
single_ax.set_yticklabels([])
single_ax.set_adjustable("box-forced")
single_ax.set_xticks(xticks)
for label in single_ax.get_xticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
label.set_fontsize(24)
label.set_fontweight("bold")
# Mutation counts:
ax[1, 1].barh(list(range(len(candidates_garbage_m1))), [200 * mutation_counts[key] if key in mutation_counts.keys() else 300.0 for key in candidates_garbage_m1], height=1.0, color="black")
ax[1, 3].barh(list(range(len(candidates_garbage_m2))), [200 * mutation_counts[key] if key in mutation_counts.keys() else 300.0 for key in candidates_garbage_m2], height=1.0, color="black")
ax[1, 1].set_xticks([20 * x for x in range(11)])
ax[1, 1].set_xticklabels([f"{x * 0.1:.1f}" for x in range(10)])
ax[1, 1].set_yticks(yticks)
ax[1, 1].set_yticklabels([])
for label in ax[1, 1].get_xticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
label.set_fontsize(16)
label.set_fontweight("bold")
ax[1, 3].set_aspect("auto")
ax[1, 3].set_xticks([20 * x for x in range(11)])
ax[1, 3].set_xticklabels([f"{x * 0.1:.1f}" for x in range(10)])
ax[1, 3].set_yticks(yticks)
ax[1, 3].set_yticklabels([])
for label in ax[1, 3].get_xticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
label.set_fontsize(16)
label.set_fontweight("bold")
im_strong_m1 = ax[1, 0].imshow(array_strong_m1, cmap=cmap_m1, vmin=0.0, vmax=max_strong, aspect="auto")
im_strong_m2 = ax[1, 2].imshow(array_strong_m2, cmap=cmap_m2, vmin=0.0, vmax=max_strong, aspect="auto")
cbar_strong_m1 = fig.colorbar(im_strong_m1, cax=ax[0, 0], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
cbar_strong_m2 = fig.colorbar(im_strong_m2, cax=ax[0, 2], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
cbar_strong_m1.set_ticks([float(idx) for idx in range(max_strong)])
cbar_strong_m1.ax.set_xticklabels([idx for idx in range(max_strong)])
cbar_strong_m1.ax.xaxis.set_ticks_position("top")
cbar_strong_m1.ax.xaxis.set_label_position("top")
cbar_strong_m2.set_ticks([float(idx) for idx in range(max_strong)])
cbar_strong_m2.ax.set_xticklabels([idx for idx in range(max_strong)])
cbar_strong_m2.ax.xaxis.set_ticks_position("top")
cbar_strong_m2.ax.xaxis.set_label_position("top")
for label in cbar_strong_m1.ax.get_xticklabels():
label.set_fontsize(16)
label.set_fontweight("bold")
for label in cbar_strong_m2.ax.get_xticklabels():
label.set_fontsize(16)
label.set_fontweight("bold")
plt.savefig(output_path, dpi=300)
plt.close("all")
def plot_complete(strong_posarray_m1, weak_posarray_m1, garbage_posarray_m1,
strong_posarray_m2, weak_posarray_m2, garbage_posarray_m2,
mutation_counts, length_total = 200,
width=40, height=40, output_path="outputimages/plot_complete.png",
x_label="foo", y_label="bar"):
max_weak = max(maximum_overlap(weak_posarray_m1), maximum_overlap(weak_posarray_m2)) + 1
max_strong = max(maximum_overlap(strong_posarray_m1), maximum_overlap(strong_posarray_m2)) + 1
max_garbage = max(maximum_overlap(garbage_posarray_m1), maximum_overlap(garbage_posarray_m2)) + 1
cmap_m1 = matplotlib.cm.get_cmap("Magentas")
cmap_m1 = plottools.cmap_discretize(cmap_m1, max_strong)
cmap_m1.set_bad(color="#ccccccff")
cmap_weak_m1 = plottools.desaturate(matplotlib.cm.get_cmap("Magentas"))
cmap_garbage_m1 = plottools.desaturate(cmap_weak_m1)
cmap_weak_m1 = plottools.cmap_discretize(cmap_weak_m1, max_weak)
cmap_weak_m1.set_bad(color="#ccccccff")
cmap_garbage_m1 = plottools.cmap_discretize(cmap_garbage_m1, max_garbage)
cmap_garbage_m1.set_bad(color="#ccccccff")
cmap_m2 = matplotlib.cm.get_cmap("Greens")
cmap_m2 = plottools.cmap_discretize(cmap_m2, max_strong)
cmap_m2.set_bad(color="#ccccccff")
cmap_weak_m2 = plottools.desaturate(matplotlib.cm.get_cmap("Greens"))
cmap_garbage_m2 = plottools.desaturate(cmap_weak_m2)
cmap_weak_m2 = plottools.cmap_discretize(cmap_weak_m2, max_weak)
cmap_weak_m2.set_bad(color="#ccccccff")
cmap_garbage_m2 = plottools.cmap_discretize(cmap_garbage_m2, max_garbage)
cmap_garbage_m2.set_bad(color="#ccccccff")
candidates_garbage_m1 = [key for key in mutation_counts.keys() if key.endswith("_m1")]
candidates_garbage_m1.sort(key=lambda x: -sum([1.0 for elem in garbage_posarray_m1[x] if elem > -1.0]) if x in garbage_posarray_m1.keys() else 0.0)
candidates_garbage_m2 = [key for key in mutation_counts.keys() if key.endswith("_m2")]
candidates_garbage_m2.sort(key=lambda x: -sum([1.0 for elem in garbage_posarray_m1[x[:-3] + "_m1"] if elem > -1.0]) if x[:-3] + "_m1" in garbage_posarray_m1.keys() else 0.0)
array_strong_m1 = np.asarray([strong_posarray_m1[row] if row in strong_posarray_m1.keys() else np.zeros(length_total) for row in candidates_garbage_m1])
array_weak_m1 = np.asarray([weak_posarray_m1[row] if row in weak_posarray_m1.keys() else np.zeros(length_total) for row in candidates_garbage_m1])
array_garbage_m1 = np.asarray([garbage_posarray_m1[row] if row in garbage_posarray_m1.keys() else np.zeros(length_total) for row in candidates_garbage_m1])
array_strong_m2 = np.asarray([strong_posarray_m2[row] if row in strong_posarray_m2.keys() else np.zeros(length_total) for row in candidates_garbage_m2])
array_weak_m2 = np.asarray([weak_posarray_m2[row] if row in weak_posarray_m2.keys() else np.zeros(length_total) for row in candidates_garbage_m2])
array_garbage_m2 = np.asarray([garbage_posarray_m2[row] if row in garbage_posarray_m2.keys() else np.zeros(length_total) for row in candidates_garbage_m2])
xticks = [idx * 10 for idx in range(21)]
yticks = [idx for idx in range(len(candidates_garbage_m1))]
fig, ax = plt.subplots(nrows=4, ncols=4, gridspec_kw = {'height_ratios': [1, 1, 80, 80], 'width_ratios': [3, 3, 3, 1]}, sharex=False, sharey=False, figsize=(width, height))
fig.subplots_adjust(hspace=0, wspace=0)
# axis sharing and deletion
ax[2, 1].get_shared_y_axes().join(ax[2, 1], ax[2, 2])
ax[2, 2].get_shared_y_axes().join(ax[2, 2], ax[2, 3])
ax[3, 1].get_shared_y_axes().join(ax[3, 1], ax[3, 2])
ax[3, 2].get_shared_y_axes().join(ax[3, 2], ax[3, 3])
ax[0, 3].set_axis_off()
ax[1, 3].set_axis_off()
for ax_idx in range(2, 4):
single_ax = ax[ax_idx, 0]
single_ax.set_aspect("auto")
single_ax.set_yticks(yticks)
single_ax.set_yticklabels(map(lambda x: x[:-3], candidates_garbage_m1))
for label in single_ax.get_yticklabels():
label.set_fontsize(24)
label.set_fontweight("bold")
for ax_idy in range(3):
single_ax = ax[ax_idx, ax_idy]
single_ax.set_aspect("auto")
if ax_idy > 0:
single_ax.set_yticks(yticks)
single_ax.set_yticklabels([])
single_ax.set_adjustable("box-forced")
if ax_idx == 3:
single_ax.set_xticks(xticks)
for label in single_ax.get_xticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
label.set_fontsize(24)
label.set_fontweight("bold")
else:
plt.setp(single_ax.get_xticklabels(), visible=False)
# Mutation counts:
if ax_idx == 2:
ax[ax_idx, 3].barh(list(range(len(candidates_garbage_m1))), [200 * mutation_counts[key] if key in mutation_counts.keys() else 300.0 for key in candidates_garbage_m1], height=1.0, color="black")
else:
ax[ax_idx, 3].barh(list(range(len(candidates_garbage_m2))), [200 * mutation_counts[key] if key in mutation_counts.keys() else 300.0 for key in candidates_garbage_m2], height=1.0, color="black")
ax[ax_idx, 3].set_aspect("auto")
ax[ax_idx, 3].set_xticks([20 * x for x in range(11)])
ax[ax_idx, 3].set_xticklabels([f"{x * 0.1:.1f}" for x in range(11)])
ax[ax_idx, 3].set_yticks(yticks)
ax[ax_idx, 3].set_yticklabels([])
for label in ax[ax_idx, 3].get_xticklabels():
label.set_rotation(90)
label.set_horizontalalignment("center")
label.set_fontsize(24)
label.set_fontweight("bold")
im_strong_m1 = ax[2, 0].imshow(array_strong_m1, cmap=cmap_m1, vmin=0.0, vmax=max_strong, aspect="auto")
im_weak_m1 = ax[2, 1].imshow(array_weak_m1, cmap=cmap_weak_m1, vmin=0.0, vmax=max_weak, aspect="auto")
im_garbage_m1 = ax[2, 2].imshow(array_garbage_m1, cmap=cmap_garbage_m1, vmin=0.0, vmax=max_garbage, aspect="auto")
im_strong_m2 = ax[3, 0].imshow(array_strong_m2, cmap=cmap_m2, vmin=0.0, vmax=max_strong, aspect="auto")
im_weak_m2 = ax[3, 1].imshow(array_weak_m2, cmap=cmap_weak_m2, vmin=0.0, vmax=max_weak, aspect="auto")
im_garbage_m2 = ax[3, 2].imshow(array_garbage_m2, cmap=cmap_garbage_m2, vmin=0.0, vmax=max_garbage, aspect="auto")
cbar_strong_m1 = fig.colorbar(im_strong_m1, cax=ax[0, 0], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
cbar_weak_m1 = fig.colorbar(im_weak_m1, cax=ax[0, 1], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
cbar_garbage_m1 = fig.colorbar(im_garbage_m1, cax=ax[0, 2], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
cbar_strong_m2 = fig.colorbar(im_strong_m2, cax=ax[1, 0], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
cbar_weak_m2 = fig.colorbar(im_weak_m2, cax=ax[1, 1], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
cbar_garbage_m2 = fig.colorbar(im_garbage_m2, cax=ax[1, 2], orientation = "horizontal", ticks=[0, 1, 2, 3, 4])
# colorbars
cbar_strong_m2.ax.set_xticklabels([])
cbar_weak_m2.ax.set_xticklabels([])
cbar_garbage_m2.ax.set_xticklabels([])
cbar_strong_m1.set_ticks([float(idx) for idx in range(max_strong)])
cbar_weak_m1.set_ticks([float(idx) for idx in range(0, max_weak, 2)])
cbar_garbage_m1.set_ticks([float(idx) for idx in range(0, max_garbage, 5)])
cbar_strong_m1.ax.set_xticklabels([idx for idx in range(max_strong)])
cbar_weak_m1.ax.set_xticklabels([idx for idx in range(0, max_weak, 2)])
cbar_garbage_m1.ax.set_xticklabels([idx for idx in range(0, max_garbage, 5)])
cbar_strong_m1.ax.xaxis.set_ticks_position("top")
cbar_weak_m1.ax.xaxis.set_ticks_position("top")
cbar_garbage_m1.ax.xaxis.set_ticks_position("top")
cbar_strong_m1.ax.xaxis.set_label_position("top")
cbar_weak_m1.ax.xaxis.set_label_position("top")
cbar_garbage_m1.ax.xaxis.set_label_position("top")
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size = "3%", pad = 0.2)
# divider.set_aspect(False)
# cax.set_aspect("auto")
# cbar = fig.colorbar(im, cax = cax, orientation = "vertical")
# cbar.ax.tick_params(labelsize=32)
# cbar.set_label(cbar_label, labelpad=20, fontsize=40)
# plt.tight_layout()
plt.savefig(output_path, dpi=300)
plt.close("all")
def nan_post_length(posarray, lengths):
result = {}
for name in lengths.keys():
if name in posarray.keys():
row = posarray[name]
else:
row = np.zeros(200)
row[lengths[name]:] = float("nan")
result[name] = row
return result
def plot_v2():
from excel_processing import read_mutation_files
renamings = parse_renamings("Renamings.csv", delimiter=",")
data_strong = read_data(Path("./NetMHCPanOutput/cMNR_peptides.csv.strong.csv"), delimiter=",")
data_weak = read_data(Path("./NetMHCPanOutput/cMNR_peptides.csv.weak.csv"), delimiter=",")
data_weak = | pd.concat([data_strong, data_weak]) | pandas.concat |
#test dataset model
from deepforest import get_data
from deepforest import dataset
from deepforest import utilities
import os
import pytest
import torch
import pandas as pd
import numpy as np
import tempfile
def single_class():
csv_file = get_data("example.csv")
return csv_file
def multi_class():
csv_file = get_data("testfile_multi.csv")
return csv_file
@pytest.mark.parametrize("csv_file,label_dict",[(single_class(), {"Tree":0}), (multi_class(),{"Alive":0,"Dead":1})])
def test_TreeDataset(csv_file, label_dict):
root_dir = os.path.dirname(get_data("OSBS_029.png"))
ds = dataset.TreeDataset(csv_file=csv_file,
root_dir=root_dir,
label_dict=label_dict)
raw_data = pd.read_csv(csv_file)
assert len(ds) == len(raw_data.image_path.unique())
for i in range(len(ds)):
#Between 0 and 1
path, image, targets = ds[i]
assert image.max() <= 1
assert image.min() >= 0
assert targets["boxes"].shape == (raw_data.shape[0],4)
assert targets["labels"].shape == (raw_data.shape[0],)
assert len(np.unique(targets["labels"])) == len(raw_data.label.unique())
def test_single_class_with_empty(tmpdir):
"""Add fake empty annotations to test parsing """
csv_file1 = get_data("example.csv")
csv_file2 = get_data("OSBS_029.csv")
df1 = pd.read_csv(csv_file1)
df2 = pd.read_csv(csv_file2)
df = pd.concat([df1,df2])
df.loc[df.image_path == "OSBS_029.tif","xmin"] = 0
df.loc[df.image_path == "OSBS_029.tif","ymin"] = 0
df.loc[df.image_path == "OSBS_029.tif","xmax"] = 0
df.loc[df.image_path == "OSBS_029.tif","ymax"] = 0
df.to_csv("{}_test_empty.csv".format(tmpdir))
root_dir = os.path.dirname(get_data("OSBS_029.png"))
ds = dataset.TreeDataset(csv_file="{}_test_empty.csv".format(tmpdir),
root_dir=root_dir,
label_dict={"Tree":0})
assert len(ds) == 2
#First image has annotations
assert not torch.sum(ds[0][2]["boxes"]) == 0
#Second image has no annotations
assert torch.sum(ds[1][2]["boxes"]) == 0
@pytest.mark.parametrize("augment",[True,False])
def test_TreeDataset_transform(augment):
csv_file = get_data("example.csv")
root_dir = os.path.dirname(csv_file)
ds = dataset.TreeDataset(csv_file=csv_file,
root_dir=root_dir,
transforms=dataset.get_transform(augment=augment))
for i in range(len(ds)):
#Between 0 and 1
path, image, targets = ds[i]
assert image.max() <= 1
assert image.min() >= 0
assert targets["boxes"].shape == (79, 4)
assert targets["labels"].shape == (79,)
assert torch.is_tensor(targets["boxes"])
assert torch.is_tensor(targets["labels"])
assert torch.is_tensor(image)
def test_collate():
"""Due to data augmentations the dataset class may yield empty bounding box annotations"""
csv_file = get_data("example.csv")
root_dir = os.path.dirname(csv_file)
ds = dataset.TreeDataset(csv_file=csv_file,
root_dir=root_dir,
transforms=dataset.get_transform(augment=False))
for i in range(len(ds)):
#Between 0 and 1
batch = ds[i]
collated_batch = utilities.collate_fn(batch)
assert len(collated_batch) == 2
def test_empty_collate():
"""Due to data augmentations the dataset class may yield empty bounding box annotations"""
csv_file = get_data("example.csv")
root_dir = os.path.dirname(csv_file)
ds = dataset.TreeDataset(csv_file=csv_file,
root_dir=root_dir,
transforms=dataset.get_transform(augment=False))
for i in range(len(ds)):
#Between 0 and 1
batch = ds[i]
collated_batch = utilities.collate_fn([None, batch, batch])
len(collated_batch[0]) == 2
def test_predict_dataloader():
csv_file = get_data("example.csv")
root_dir = os.path.dirname(csv_file)
ds = dataset.TreeDataset(csv_file=csv_file,
root_dir=root_dir,
train=False)
image = next(iter(ds))
#Assert image is channels first format
assert image.shape[0] == 3
def test_multi_image_warning():
tmpdir = tempfile.gettempdir()
csv_file1 = get_data("example.csv")
csv_file2 = get_data("OSBS_029.csv")
df1 = pd.read_csv(csv_file1)
df2 = | pd.read_csv(csv_file2) | pandas.read_csv |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = | pd.read_csv(path) | pandas.read_csv |
import os
from matplotlib import pyplot as plt
import pandas_datareader.data as data
#os.chdir(r"D:\githubby\StockAnalysis")
from Baseutils import Baseutils
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
import math
import numpy as np
from datetime import datetime, date, timedelta
Util = Baseutils()
prices = Util.stksearch2p("TSLA",Util.start,Util.end)
df = Util.stksearch("TSLA",Util.start, Util.end)
class LRshift_N():
def __init__(self, name): #perfect I guess except for self.scalar2, what is that.
self.name = name
self.utility = Baseutils()
self.prices = Util.stksearch2p(self.name,self.utility.start,self.utility.end)
self.df = Util.stksearch(self.name,self.utility.start,self.utility.end)
print("latest data", self.df.iloc[-1:,0:0])
self.n = 10
self.scalar_ = StandardScaler()
self.scalar2 = StandardScaler()
self.lags = 75 #200 for NIkkei, 75 for tesla. Notsure.
def acorr_Close(self):
df = self.df
newone = self.shifting(df,self.lags)
newone = newone.drop(["High","Low","Open","Close","Volume"],axis = 1)
today = newone.iloc[-1:,:].drop(["next day"],axis=1).values #Today's price. Generally useful.
print(today,"today after generated")
newone = newone.dropna()
X_train, X_test, Y_train, Y_test, today = self.dataprocessing2(newone,today)
model = self.learning2(X_train, X_test, Y_train, Y_test)
predicted = model.predict(today)
return model, today, predicted
def shifting(self,df,n):
"""
input data and lag to make a shifted data.
"""
for t in range(1,n):
if t == 1:
a = df["Adj Close"].shift(+t).to_frame()
a.columns = ["t_{}".format(t)]
newone = pd.concat([df,a],axis = 1)
else:
a = df["Adj Close"].shift(+t).to_frame()
a.columns = ["t_{}".format(t)]
newone = pd.concat([newone,a],axis = 1)
a = df["Adj Close"].shift(-1).to_frame()
a.columns = ["next day"]
newone = | pd.concat([newone,a],axis = 1) | pandas.concat |
import time
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import KFold
from tqdm import tqdm
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
print('%r %2.2f ms' % (method.__name__, (te - ts) * 1000))
return result
return timed
@timeit
def fea_date_time(df):
date_org = df['date']
df['date'] = df['date'].astype(str)
df["date"] = df["date"].apply(lambda x: x[:4] + "-" + x[4:6] + "-" + x[6:])
df["date"] = pd.to_datetime(df["date"])
# df["year"] = df['date'].dt.year
df["month"] = df['date'].dt.month
df["day"] = df['date'].dt.day
df['hour'] = pd.to_datetime(df['visitStartTime'], unit='s').dt.hour # aiden
# df["weekofmonth"] = df['day'].astype(int) // 7 # aiden
df["weekday"] = df['date'].dt.weekday
# df['weekofyear'] = df['date'].dt.weekofyear
df['month_unique_user_count'] = df.groupby('month')['fullVisitorId'].transform('nunique')
# df['month_unique_s_count'] = df.groupby('month')['sessionId'].transform('nunique')
# df['day_unique_user_count'] = df.groupby('day')['fullVisitorId'].transform('nunique')
# df['day_unique_s_count'] = df.groupby('day')['sessionId'].transform('nunique')
df['weekday_unique_user_count'] = df.groupby('weekday')['fullVisitorId'].transform('nunique')
# df['weekday_unique_s_count'] = df.groupby('weekday')['sessionId'].transform('nunique')
df['hour_unique_user_count'] = df.groupby('hour')['fullVisitorId'].transform('nunique') # aiden
# df['hour_unique_s_count'] = df.groupby('hour')['sessionId'].transform('nunique') # aiden
df['hour_unique_user_count'] = df.groupby('hour')['fullVisitorId'].transform('nunique')
df['user_hour_mean'] = df.groupby(['fullVisitorId'])['hour'].transform('mean') # aiden
df['user_hour_max'] = df.groupby(['fullVisitorId'])['hour'].transform('max') # aiden
df['user_hour_min'] = df.groupby(['fullVisitorId'])['hour'].transform('min') # aiden
# df['user_hour_var'] = df.groupby(['fullVisitorId'])['hour'].transform('var') # aiden
# df['user_hour_max-min'] = df['user_hour_max'] - df['user_hour_min'] # aiden
# df['user_weekday_hour_mean'] = df.groupby(['fullVisitorId', 'weekday'])['hour'].transform('mean') # aiden
df['date'] = date_org
return df
@timeit
def fea_format(df):
for col in ['visitNumber', 'totals_hits', 'totals_pageviews']:
df[col] = df[col].astype(float)
df['trafficSource_adwordsClickInfo.isVideoAd'].fillna(True, inplace=True)
df['trafficSource_isTrueDirect'].fillna(False, inplace=True)
return df
@timeit
def fea_device(df):
df['browser_category'] = df['device_browser'] + '_' + df['device_deviceCategory']
df['browser_operatingSystem'] = df['device_browser'] + '_' + df['device_operatingSystem']
df['mean_hour_per_browser_operatingSystem'] = df.groupby('browser_operatingSystem')['hour'].transform(
'mean') # aiden
df['source_country'] = df['trafficSource_source'] + '_' + df['geoNetwork_country']
return df
@timeit
def fea_totals(df):
df['visitNumber'] = np.log1p(df['visitNumber'])
df['totals_hits'] = np.log1p(df['totals_hits'])
df['totals_pageviews'] = np.log1p(df['totals_pageviews'].fillna(0))
# df['totals_pageviews_hit_rate'] = df['totals_hits'] - df['totals_pageviews']
# df['mean_hits_per_day'] = df.groupby(['day'])['totals_hits'].transform('mean')
df['sum_hits_per_day'] = df.groupby(['day'])['totals_hits'].transform('sum')
df['max_hits_per_day'] = df.groupby(['day'])['totals_hits'].transform('max')
# df['min_hits_per_day'] = df.groupby(['day'])['totals_hits'].transform('min')
df['var_hits_per_day'] = df.groupby(['day'])['totals_hits'].transform('var')
df['mean_hits_per_hour'] = df.groupby(['hour'])['totals_hits'].transform('mean') # aiden
df['sum_hits_per_hour'] = df.groupby(['hour'])['totals_hits'].transform('sum') # aiden
df['max_hits_per_hour'] = df.groupby(['hour'])['totals_hits'].transform('max') # aiden
# df['min_hits_per_hour'] = df.groupby(['hour'])['totals_hits'].transform('min') # aiden
# df['var_hits_per_hour'] = df.groupby(['hour'])['totals_hits'].transform('var') # aiden
return df
@timeit
def fea_geo_network(df):
# df['sum_pageviews_per_network_domain'] = df.groupby('geoNetwork_networkDomain')['totals_pageviews'].transform('sum')
# df['count_pageviews_per_network_domain'] = df.groupby('geoNetwork_networkDomain')['totals_pageviews'].transform(
# 'count')
df['mean_pageviews_per_network_domain'] = df.groupby('geoNetwork_networkDomain')['totals_pageviews'].transform(
'mean')
df['sum_hits_per_network_domain'] = df.groupby('geoNetwork_networkDomain')['totals_hits'].transform('sum')
# df['count_hits_per_network_domain'] = df.groupby('geoNetwork_networkDomain')['totals_hits'].transform('count')
# df['mean_hits_per_network_domain'] = df.groupby('geoNetwork_networkDomain')['totals_hits'].transform('mean')
return df
@timeit
def fea_traffic_source(df):
df['campaign_medium'] = df['trafficSource_campaign'] + '_' + df['trafficSource_medium']
df['medium_hits_mean'] = df.groupby(['trafficSource_medium'])['totals_hits'].transform('mean')
df['medium_hits_max'] = df.groupby(['trafficSource_medium'])['totals_hits'].transform('max')
df['medium_hits_min'] = df.groupby(['trafficSource_medium'])['totals_hits'].transform('min')
df['medium_hits_sum'] = df.groupby(['trafficSource_medium'])['totals_hits'].transform('sum')
return df
@timeit
def fea_shift(df): # aiden
df_g_sorted = df.sort_values(['visitStartTime'], ascending=True).groupby(['fullVisitorId'])
df['visitStartTime_b1'] = df_g_sorted['visitStartTime'].shift(1)
df.loc[df['visitStartTime_b1'].isnull(), 'visitStartTime_b1'] = df['visitStartTime']
df['visitStartTime_b2'] = df_g_sorted['visitStartTime'].shift(2)
df.loc[df['visitStartTime_b2'].isnull(), 'visitStartTime_b2'] = df['visitStartTime_b1']
df['visitStartTime_b1_diff'] = np.log1p(df['visitStartTime'] - df['visitStartTime_b1'])
df['visitStartTime_b2_diff'] = np.log1p(df['visitStartTime_b1'] - df['visitStartTime_b2'])
df.drop(['visitStartTime_b1'], axis=1, inplace=True)
df.drop(['visitStartTime_b2'], axis=1, inplace=True)
df['totals_hits_b1'] = df_g_sorted['totals_hits'].shift(1).fillna(0)
df['totals_pageviews_b1'] = df_g_sorted['totals_pageviews'].shift(1).fillna(0)
return df
def get_features(df):
org_cols = df.columns
df = fea_date_time(df)
df = fea_format(df)
df = fea_device(df)
df = fea_totals(df)
df = fea_geo_network(df)
df = fea_traffic_source(df)
df = fea_shift(df)
fea_cols = list(set(df.columns) - set(org_cols))
# print(new_cols)
return df, fea_cols
@timeit
def encode_label(df_train, df_test, categorical_feature):
print(categorical_feature)
df_merge = pd.concat([df_train[categorical_feature], df_test[categorical_feature]])
train_size = df_train.shape[0]
for c in tqdm(categorical_feature):
# st = time.time()
labels, _ = pd.factorize(df_merge[c].values.astype('str'))
df_train[c] = labels[:train_size]
df_test[c] = labels[train_size:]
# print(c, time.time() - st)
return df_train, df_test
@timeit
def encode_frequency(df_train, df_test, categorical_feature):
df_merge = pd.concat([df_train[categorical_feature], df_test[categorical_feature]])
for col in tqdm(categorical_feature):
freq_col = '{}_Frequency'.format(col)
df_freq = df_merge.groupby([col]).size() / df_merge.shape[0]
df_freq = df_freq.reset_index().rename(columns={0: freq_col})
if freq_col in df_train.columns:
df_train.drop(freq_col, axis=1, inplace=True)
if freq_col in df_test.columns:
df_test.drop(freq_col, axis=1, inplace=True)
df_train = df_train.merge(df_freq, on=col, how='left')
df_test = df_test.merge(df_freq, on=col, how='left')
print(df_train.shape, df_test.shape)
return df_train, df_test
@timeit
def encode_mean_k_fold(df_train, df_test, categorical_feature, target_col):
def _encode(col, alpha):
target_mean_global = df_train[target_col].mean()
nrows_cat = df_train.groupby(col)[target_col].count()
target_means_cats = df_train.groupby(col)[target_col].mean()
target_means_cats_adj = (target_means_cats * nrows_cat +
target_mean_global * alpha) / (nrows_cat + alpha)
# Mapping means to test data
encoded_col_test = df_test[col].map(target_means_cats_adj)
kfold = KFold(n_splits=5, shuffle=True, random_state=1989)
parts = []
for trn_inx, val_idx in kfold.split(df_train):
df_for_estimation, df_estimated = df_train.iloc[trn_inx], df_train.iloc[val_idx]
nrows_cat = df_for_estimation.groupby(col)[target_col].count()
target_means_cats = df_for_estimation.groupby(col)[target_col].mean()
target_means_cats_adj = (target_means_cats * nrows_cat +
target_mean_global * alpha) / (nrows_cat + alpha)
encoded_col_train_part = df_estimated[col].map(target_means_cats_adj)
parts.append(encoded_col_train_part)
encoded_col_train = pd.concat(parts, axis=0)
encoded_col_train.fillna(target_mean_global, inplace=True)
encoded_col_train.sort_index(inplace=True)
return encoded_col_train, encoded_col_test
for col in tqdm(categorical_feature):
temp_encoded_tr, temp_encoded_te = _encode(col, 5)
new_feat_name = 'mean_k_fold_{}'.format(col)
df_train[new_feat_name] = temp_encoded_tr.values
df_test[new_feat_name] = temp_encoded_te.values
print(df_train.shape, df_test.shape)
print(df_train.columns)
return df_train, df_test
@timeit
def encode_lda(df_train, df_test, categorical_feature, y_categorized, n_components=10, fea_name='lda'):
print('lda_{}_0to{}'.format(fea_name, n_components - 1))
clf = LinearDiscriminantAnalysis(n_components=n_components)
df_merge = pd.concat([df_train[categorical_feature], df_test[categorical_feature]])
clf.fit(df_merge[categorical_feature], y_categorized)
df_train_lda = pd.DataFrame(clf.transform(df_train[categorical_feature]))
df_test_lda = pd.DataFrame(clf.transform(df_test[categorical_feature]))
col_map = {i: 'lda_{}_{}'.format(fea_name, i) for i in range(n_components)}
df_train_lda.rename(columns=col_map, inplace=True)
df_test_lda.rename(columns=col_map, inplace=True)
for c in col_map:
if c in df_train.columns:
df_train.drop(c, axis=1, inplace=True)
if c in df_test.columns:
df_test.drop(c, axis=1, inplace=True)
df_train = | pd.concat([df_train, df_train_lda], axis=1) | pandas.concat |
import pytest
from sklearn_pandas.transformers.row_filter import *
import pandas as pd
def test_no_missing_DropNARowFilter():
X = | pd.DataFrame({'A': [1, 2, 3]}) | pandas.DataFrame |
from collections import defaultdict
from dataclasses import asdict, dataclass, fields
from datetime import datetime
from itertools import chain
from typing import Generator, Iterable, List
import pandas as pd
@dataclass(frozen=True)
class Account:
identifier: str
username: str
full_name: str
centrality: float = None
date_scraped: datetime = None
def account_from_obj(obj):
return Account(
identifier=obj.identifier,
username=obj.username,
full_name=obj.full_name,
centrality=getattr(obj, 'centrality', None),
)
def account_to_camel_case(account: Account) -> dict:
return {
'identifier': account.identifier,
'username': account.username,
'fullName': account.full_name,
}
@dataclass(frozen=True)
class AccountDetails:
identifier: str
username: str
full_name: str
profile_pic_url: str
profile_pic_url_hd: str
biography: str
external_url: str
follows_count: int
followed_by_count: int
media_count: int
is_private: bool
is_verified: bool
country_block: bool
has_channel: bool
highlight_reel_count: bool
is_business_account: bool
is_joined_recently: bool
business_category_name: str
business_email: str
business_phone_number: str
business_address_json: str
connected_fb_page: str
centrality: float = None
date_scraped: datetime = None
def account_details_from_obj(obj):
return AccountDetails(
identifier=obj.identifier,
username=obj.username,
full_name=obj.full_name,
profile_pic_url=obj.profile_pic_url,
profile_pic_url_hd=obj.profile_pic_url_hd,
biography=obj.biography,
external_url=obj.external_url,
follows_count=obj.follows_count,
followed_by_count=obj.followed_by_count,
media_count=obj.media_count,
is_private=obj.is_private,
is_verified=obj.is_verified,
country_block=obj.country_block,
has_channel=obj.has_channel,
highlight_reel_count=obj.highlight_reel_count,
is_business_account=obj.is_business_account,
is_joined_recently=obj.is_joined_recently,
business_category_name=obj.business_category_name,
business_email=obj.business_email,
business_phone_number=obj.business_phone_number,
business_address_json=obj.business_address_json,
connected_fb_page=obj.connected_fb_page,
)
def accounts_from_dataframe(
data: pd.DataFrame
) -> Generator[Account, None, None]:
for row in data.itertuples(index=False):
row_data = row._asdict()
# Dealing with Pandas NaT values
if | pd.isnull(row_data["date_scraped"]) | pandas.isnull |
import numpy as np
import pandas as pd
from TACT.computation.adjustments import Adjustments, empirical_stdAdjustment
from TACT.computation.ml import machine_learning_TI
def perform_SS_NN_adjustment(inputdata):
inputdata_test_result = pd.DataFrame()
results = pd.DataFrame(
columns=[
"sensor",
"height",
"adjustment",
"m",
"c",
"rsquared",
"difference",
"mse",
"rmse",
]
)
inputdata_train = inputdata[inputdata["split"] == True].copy()
inputdata_test = inputdata[inputdata["split"] == False].copy()
adj = Adjustments()
if inputdata.empty or len(inputdata) < 2:
results = adj.post_adjustment_stats([None], results, "Ref_TI", "adjTI_RSD_TI")
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
results = adj.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
m = np.NaN
c = np.NaN
inputdata = False
else:
all_train = pd.DataFrame()
all_train["y_train"] = inputdata_train["Ref_TI"].copy()
all_train["x_train_TI"] = inputdata_train["RSD_TI"].copy()
all_train["x_train_TKE"] = inputdata_train["RSD_LidarTKE_class"].copy()
all_train["x_train_WS"] = inputdata_train["RSD_WS"].copy()
all_train["x_train_DIR"] = inputdata_train["RSD_Direction"].copy()
all_train["x_train_Hour"] = inputdata_train["Hour"].copy()
all_train["x_train_TEMP"] = inputdata_train["Temp"].copy()
all_train["x_train_HUM"] = inputdata_train["Humidity"].copy()
all_train["x_train_SD"] = inputdata_train["SD"].copy()
all_train["x_train_Tshift1"] = inputdata_train["x_train_Tshift1"].copy()
all_train["x_train_Tshift2"] = inputdata_train["x_train_Tshift3"].copy()
all_train["x_train_Tshift3"] = inputdata_train["x_train_Tshift3"].copy()
all_test = | pd.DataFrame() | pandas.DataFrame |
"""
Copyright 2022 HSBC Global Asset Management (Deutschland) GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
import pyratings as rtg
from tests import conftest
# --- input: single score/warf
@pytest.mark.parametrize(
["rating_provider", "score", "rating"],
list(
pd.concat(
[
conftest.scores_df_long,
conftest.rtg_df_long["rating"],
],
axis=1,
).to_records(index=False)
),
)
def test_get_rating_from_single_score_longterm(rating_provider, score, rating):
"""Tests if function can handle single string objects."""
act = rtg.get_ratings_from_scores(
rating_scores=score, rating_provider=rating_provider, tenor="long-term"
)
assert act == rating
def test_get_rating_from_single_score_float_longterm():
assert (
rtg.get_ratings_from_scores(
rating_scores=5.499, rating_provider="Fitch", tenor="long-term"
)
== "A+"
)
assert (
rtg.get_ratings_from_scores(
rating_scores=5.501, rating_provider="Fitch", tenor="long-term"
)
== "A"
)
@pytest.mark.parametrize(
["rating_provider", "score", "rating"],
list(
pd.concat(
[
conftest.scores_df_long_st,
conftest.rtg_df_long_st["rating"],
],
axis=1,
).to_records(index=False)
),
)
def test_get_rating_from_single_score_shortterm(rating_provider, score, rating):
"""Tests if function can handle single string objects."""
act = rtg.get_ratings_from_scores(
rating_scores=score, rating_provider=rating_provider, tenor="short-term"
)
assert act == rating
def test_get_rating_from_single_score_float_shortterm():
assert (
rtg.get_ratings_from_scores(
rating_scores=5.499, rating_provider="DBRS", tenor="short-term"
)
== "R-2 (high)"
)
assert (
rtg.get_ratings_from_scores(
rating_scores=5.501, rating_provider="DBRS", tenor="short-term"
)
== "R-2 (mid)"
)
@pytest.mark.parametrize("tenor", ["long-term", "short-term"])
def test_get_ratings_from_single_score_invalid_rating_provider(tenor):
"""Tests if correct error message will be raised."""
with pytest.raises(AssertionError) as err:
rtg.get_ratings_from_scores(
rating_scores=10, rating_provider="foo", tenor=tenor
)
assert str(err.value) == conftest.ERR_MSG
@pytest.mark.parametrize("tenor", ["long-term", "short-term"])
def test_get_ratings_with_invalid_single_score(tenor):
"""Tests if function returns NaN for invalid inputs."""
act = rtg.get_ratings_from_scores(
rating_scores=-5, rating_provider="Fitch", tenor=tenor
)
assert pd.isna(act)
@pytest.mark.parametrize("tenor", ["long-term", "short-term"])
def test_get_ratings_with_single_score_and_no_rating_provider(tenor):
"""Tests if correct error message will be raised."""
with pytest.raises(ValueError) as err:
rtg.get_ratings_from_scores(rating_scores=-5, tenor=tenor)
assert str(err.value) == "'rating_provider' must not be None."
@pytest.mark.parametrize(
"warf, rating_provider, rating",
[
(1, "SP", "AAA"),
(455, "SP", "BBB"),
(484.9999, "SP", "BBB"),
(485, "Moody", "Baa3"),
(9999, "Moody's", "C"),
(10000, "Fitch", "D"),
],
)
def test_get_ratings_from_single_warf(warf, rating_provider, rating):
"""Tests if function can correctly handle individual warf (float)."""
act = rtg.get_ratings_from_warf(warf=warf, rating_provider=rating_provider)
assert act == rating
def test_get_ratings_from_single_warf_with_no_rating_provider():
"""Tests if correct error message will be raised."""
with pytest.raises(ValueError) as err:
rtg.get_ratings_from_warf(warf=100, rating_provider=None)
assert str(err.value) == "'rating_provider' must not be None."
@pytest.mark.parametrize("warf", [np.nan, -5, 20000])
def test_get_ratings_from_invalid_single_warf(warf):
"""Tests if function returns NaN for invalid inputs."""
assert pd.isna(rtg.get_ratings_from_warf(warf=warf, rating_provider="DBRS"))
# --- input: ratings score series
@pytest.mark.parametrize(
["rating_provider", "scores_series", "ratings_series"],
conftest.params_provider_scores_ratings_lt,
)
def test_get_ratings_from_scores_series_longterm(
rating_provider, scores_series, ratings_series
):
"""Tests if function can correctly handle pd.Series objects."""
act = rtg.get_ratings_from_scores(
rating_scores=scores_series, rating_provider=rating_provider
)
ratings_series.name = f"rtg_{rating_provider}"
assert_series_equal(act, ratings_series)
@pytest.mark.parametrize(
["rating_provider", "scores_series", "ratings_series"],
conftest.params_provider_scores_ratings_lt,
)
def test_get_ratings_from_scores_series_longterm_float(
rating_provider, scores_series, ratings_series
):
"""Tests if function can correctly handle pd.Series objects."""
act = rtg.get_ratings_from_scores(
rating_scores=scores_series.add(0.23), rating_provider=rating_provider
)
ratings_series.name = f"rtg_{rating_provider}"
assert_series_equal(act, ratings_series)
@pytest.mark.parametrize(
["rating_provider", "scores_series", "ratings_series"],
conftest.params_provider_scores_ratings_st,
)
def test_get_ratings_from_scores_series_shortterm(
rating_provider, scores_series, ratings_series
):
"""Tests if function can correctly handle pd.Series objects."""
act = rtg.get_ratings_from_scores(
rating_scores=scores_series, rating_provider=rating_provider, tenor="short-term"
)
ratings_series.name = f"rtg_{rating_provider}"
assert_series_equal(act, ratings_series)
@pytest.mark.parametrize("tenor", ["long-term", "short-term"])
def test_get_ratings_from_scores_series_invalid_rating_provider(tenor):
"""Tests if correct error message will be raised."""
with pytest.raises(AssertionError) as err:
rtg.get_ratings_from_scores(
rating_scores=pd.Series(data=[1, 3, 22], name="rtg_score"),
rating_provider="foo",
tenor=tenor,
)
assert str(err.value) == conftest.ERR_MSG
@pytest.mark.parametrize("tenor", ["long-term", "short-term"])
def test_get_ratings_from_scores_series_with_no_rating_provider(tenor):
"""Tests if correct error message will be raised."""
with pytest.raises(AssertionError) as err:
rtg.get_ratings_from_scores(
rating_scores=pd.Series(data=[1, 3, 22], name="foo"),
tenor=tenor,
)
assert str(err.value) == conftest.ERR_MSG
@pytest.mark.parametrize("tenor", ["long-term", "short-term"])
def test_get_ratings_from_invalid_scores_series(tenor):
"""Tests if function can correctly handle pd.Series objects."""
scores_series = pd.Series(data=[np.nan, "foo", -10], name="rtg_score")
ratings_series = pd.Series(data=[np.nan, np.nan, np.nan], name="rating")
act = rtg.get_ratings_from_scores(
rating_scores=scores_series, rating_provider="Fitch", tenor=tenor
)
ratings_series.name = "rtg_Fitch"
assert_series_equal(act, ratings_series, check_dtype=False)
@pytest.mark.parametrize(
["rating_provider", "warf_series", "ratings_series"],
conftest.params_provider_warf_ratings,
)
def test_get_ratings_from_warf_series(rating_provider, warf_series, ratings_series):
"""Tests if function can correctly handle pd.Series objects."""
act = rtg.get_ratings_from_warf(warf=warf_series, rating_provider=rating_provider)
ratings_series.name = f"rtg_{rating_provider}"
assert_series_equal(act, ratings_series)
def test_get_ratings_from_invalid_warf_series():
"""Tests if function can correctly handle pd.Series objects."""
warf_series = pd.Series(data=[np.nan, "foo", -10], name="rtg_score")
ratings_series = pd.Series(data=[np.nan, np.nan, np.nan], name="rating")
act = rtg.get_ratings_from_warf(warf=warf_series, rating_provider="Fitch")
ratings_series.name = "rtg_Fitch"
assert_series_equal(act, ratings_series, check_dtype=False)
# --- input: rating score dataframe
exp_lt = conftest.rtg_df_wide
exp_lt = pd.concat(
[
exp_lt,
pd.DataFrame(
data=[[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]],
columns=exp_lt.columns,
),
],
axis=0,
ignore_index=True,
)
exp_lt.columns = [
"rtg_Fitch",
"rtg_Moody",
"rtg_SP",
"rtg_Bloomberg",
"rtg_DBRS",
"rtg_ICE",
]
exp_st = conftest.rtg_df_wide_st
exp_st = pd.concat(
[
exp_st,
pd.DataFrame(
data=[[np.nan, np.nan, np.nan, np.nan]],
columns=exp_st.columns,
),
],
axis=0,
ignore_index=True,
)
exp_st.columns = [
"rtg_Fitch",
"rtg_Moody",
"rtg_SP",
"rtg_DBRS",
]
def test_get_ratings_from_scores_dataframe_with_explicit_rating_provider_longterm():
"""Tests if function can correctly handle pd.DataFrame objects."""
act = rtg.get_ratings_from_scores(
rating_scores=conftest.scores_df_wide_with_err_row,
rating_provider=[
"rtg_Fitch",
"Moody's rating",
"Rating S&P",
"Bloomberg Bloomberg RATING",
"DBRS",
"ICE",
],
tenor="long-term",
)
# noinspection PyTypeChecker
| assert_frame_equal(act, exp_lt) | pandas.testing.assert_frame_equal |
import numpy as np
import pandas as pd
import pickle
import os
from itertools import product
from cddm_data_simulation import ddm_simulate
from cdwiener import array_fptd
def generate_input_grid(param_grid_size = 100, rt_grid_size=100):
grid_v = np.random.uniform(-1, 1, param_grid_size)
grid_a = np.random.uniform(0, 1.5, param_grid_size)
grid_z = np.random.uniform(0, 1, param_grid_size)
grid_rt = np.random.uniform(0, 5, rt_grid_size)
grid_choice = [-1, 1]
return np.array(list(product(grid_v, grid_a, grid_z, grid_rt, grid_choice)))
def generate_random_grid(size, v_bound = [-1, 1], a_bound = [.3, 3], w_bound = [.3, .7], t_params = [.75, 1.5]):
v = np.random.uniform(low = v_bound[0], high = v_bound[1], size = size)
a = np.random.uniform(low = a_bound[0], high = a_bound[1], size = size)
w = np.random.uniform(low = w_bound[0], high = w_bound[1], size = size)
t = np.random.gamma(t_params[0], t_params[1], size=size)
choice = np.random.choice([-1, 1], size)
data = | pd.DataFrame({"v": v, "a": a, "w": w, "rt": t, "choice": choice}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 8 21:47:05 2017
@author: a002087
"""
import os
import shutil
import sys
import datetime
import codecs
import pandas as pd
import uuid
import re
import pathlib
import time
from core.load import SaveLoadDelete
current_path = os.path.dirname(os.path.realpath(__file__))[:-4]
if current_path not in sys.path:
sys.path.append(current_path)
import core
import core.exceptions as exceptions
"""
Module contains classes related to the structure of a workspace-directory.
WorkSpace is the top class and contains one WorkStep-object representing step_0
and one or several Subset-objects. Each Subset contains several WorkStep-objects
for step_1, step_2....etc.
All calls from outside this module should be made to the WorkSpace instance.
If information in subsets or steps is needed the WorkSpace-class should be
updated to retrieve this information (data should be passt on hierarchically in
the data structure)
Be aware that classes in this module are dependent on the directory and file
structure of a Workspace. Altering the structure of the workspace directory
tree
"""
###############################################################################
class WorkStep(object):
"""
A WorkStep holds information about the file structure in a step-directory
and contains all methodes operating on a specific workstep.
"""
def __init__(self,
name=None,
parent_directory=None,
mapping_objects={},
parent_workspace_object=None,
parent_subset_object=None):
if not all([name, parent_directory]):
return
name = get_step_name(name)
self.paths = {}
self.name = name
self.paths['parent_directory'] = parent_directory
self.paths['step_directory'] = '/'.join([self.paths['parent_directory'], self.name])
self.parent_workspace_object = parent_workspace_object
self.parent_subset_object = parent_subset_object
self.wb_id_header = self.parent_workspace_object.wb_id_header
"""
Input argument mapping_objects is a dictionary since there might be several mapping objects in the future.
We do not want to (?) load mapping objects individually in each sub-object to save memory.
"""
self.mapping_objects = mapping_objects
self._initiate_attributes()
self._set_directories()
self._create_folder_structure()
self.load_all_files()
self._check_folder_structure()
# print('Initiating WorkStep: {}'.format(self.paths['step_directory']))
#==========================================================================
def _create_folder_structure(self):
"""
Sets up the needed folder structure for the workstep.
Folders are added if they dont exist.
"""
if not os.path.exists(self.paths['step_directory']):
os.makedirs(self.paths['step_directory'])
for path in self.paths['directory_paths'].values():
if not os.path.exists(path):
os.makedirs(path)
#==========================================================================
def _create_file_paths(self):
"""
Builds file paths for:
indicator_settings
"""
self.paths['indicator_settings_paths'] = {}
for file_name in os.listdir(self.paths['directory_paths']['indicator_settings']):
if file_name.endswith('.set'):
file_path = '/'.join([self.paths['directory_paths']['indicator_settings'], file_name])
indicator = file_name.split('.')[0]
self.paths['indicator_settings_paths'][indicator] = file_path
#==========================================================================
def _check_folder_structure(self):
#TODO: make check of workspace folder structure
all_ok = True
for key, item in self.paths['directory_paths'].items():
if os.path.isdir(item):
continue
else:
all_ok = False
try:
# MW: Does not work for me in Spyder
raise('PathError')
except:
pass
self._logger.debug('no folder set for: {}'.format(item))
return all_ok
#==========================================================================
def _initiate_attributes(self):
"""
Updated 20180720 by <NAME>
Load attributes
"""
self.data_filter = None
self.indicator_settings = {}
self.water_body_filter = None
self.allowed_data_filter_steps = ['step_0', 'step_1']
self.allowed_indicator_settings_steps = ['step_2']
self.allowed_indicator_calculation_steps = ['step_3']
self.result_data = {}
#==========================================================================
def _change_ok(self):
"""
Check to make sure that the default
"""
if self.parent_subset_object and self.parent_workspace_object.alias == 'default_workspace':
self._logger.debug('Not allowed to change default workspace!')
return False
elif self.parent_subset_object and self.parent_subset_object.alias == 'default_subset':
self._logger.debug('Not allowed to change default subset!')
return False
return True
#==========================================================================
def _set_directories(self):
#set paths
self.paths['directory_paths'] = {}
self.paths['directory_paths']['data_filters'] = self.paths['step_directory'] + '/data_filters'
self.paths['directory_paths']['settings'] = self.paths['step_directory'] + '/settings'
self.paths['directory_paths']['indicator_settings'] = self.paths['step_directory'] + '/settings/indicator_settings'
self.paths['directory_paths']['water_body_station_filter'] = self.paths['step_directory'] + '/settings/water_body'
self.paths['directory_paths']['output'] = self.paths['step_directory'] + '/output'
self.paths['directory_paths']['results'] = self.paths['step_directory'] + '/output/results'
#==========================================================================
def add_files_from_workstep(self, step_object=None, overwrite=False):
"""
Copy files from given workstep. Option to overwrite or not.
This method shold generaly be used when copying step_0 or a whole subset.
DONT USE FOR COPYING SINGLE STEPS NUMBERED 1 and up.
"""
for from_file_path in step_object.get_all_file_paths_in_workstep():
to_file_path = from_file_path.replace(step_object.paths['step_directory'], self.paths['step_directory'])
if os.path.exists(to_file_path) and not overwrite:
continue
to_directory = os.path.dirname(to_file_path)
if not os.path.exists(to_directory):
# If directory has been added in later versions of the ekostat calculator
os.makedirs(to_directory)
# Copy file
shutil.copy(from_file_path, to_file_path)
self.load_all_files()
#==========================================================================
def calculate_status(self, indicator_list = None, water_body_list = None):
"""
Created 20180613 by <NAME>
Calls calculate_status for each indicator object and returns the result for each waterbody as dataframes in a dict
"""
if self.name != 'step_3':
return False
if water_body_list == None:
water_body_list = self.parent_workspace_object.get_filtered_data(step='step_2',
subset=self.parent_subset_object.unique_id)[self.wb_id_header].unique()
if not len(water_body_list):
#raise Error?
print('no waterbodies in filtered data')
return False
#---------------------------------------------------------------------
def concat_df(df, save_df, filename, water_body, indicator_object):
#concatenate results
if type(save_df) is pd.DataFrame:
if water_body in save_df[self.wb_id_header].unique():
save_df.drop(save_df.loc[save_df[self.wb_id_header] == water_body].index, inplace = True)
save_df = pd.concat([save_df, df])
elif os.path.exists(indicator_object.result_directory + filename + '.txt'):
save_df = indicator_object.sld.load_df(file_name = filename)
if water_body in save_df[self.wb_id_header].unique():
save_df.drop(save_df.loc[save_df[self.wb_id_header] == water_body].index, inplace = True)
save_df = pd.concat([save_df, df])
else:
save_df = df
return save_df
#---------------------------------------------------------------------
if indicator_list == None:
indicator_list = self.parent_workspace_object.available_indicators
if indicator_list == None:
indicator_list = self.parent_workspace_object.get_available_indicators(subset = self.parent_subset_object.unique_id, step = 2)
for indicator in indicator_list:
status_by_date = False
status_by_year_pos = False
status_by_year = False
status_by_period = False
indicator_name = self.indicator_objects[indicator].name
print(indicator_name)
t_ind = time.time()
by_date, by_year_pos, by_year, by_period = False, False, False, False
for water_body in dict.fromkeys(water_body_list,True):
#print(water_body)
# t_wb = time.time()
if water_body not in self.indicator_objects[indicator].water_body_indicator_df.keys():
continue
#TODO: Here is the main calculate status call.
# - I have thought about to put the loop over waterbodies in the IndicatorBase class.
# - the result in by_date is really a combination of the dataframe with original data and "results" for each date.
# It might be better to get the data part in a dataframe separately and the add the results later. I would like to discuss this.
# - for the user (web interface at least) it is only the bu_date and by_period results that are relevant.
# The other steps are good if I can access when developing with the calculation code but should not be needed for the regular user
by_date, by_year_pos, by_year, by_period = self.indicator_objects[indicator].calculate_status(water_body = water_body)
# time_wb = time.time() - t_wb
# print('-'*50)
# print('Total time to calculate status for water body {}:'.format(water_body), time_wb)
# print('-'*50)
if type(by_date) is not bool:
status_by_date = concat_df(by_date, status_by_date, 'indicator_' + indicator_name + '-by_date',
water_body, self.indicator_objects[indicator])
if type(by_year_pos) is not bool:
status_by_year_pos = concat_df(by_year_pos, status_by_year_pos, indicator_name + '-by_year_pos',
water_body, self.indicator_objects[indicator])
if type(by_year) is not bool:
status_by_year = concat_df(by_year, status_by_year, 'indicator_' + indicator_name + '-by_year',
water_body, self.indicator_objects[indicator])
if type(by_period) is not bool:
status_by_period = concat_df(by_period, status_by_period, 'indicator_' + indicator_name + '-by_period',
water_body, self.indicator_objects[indicator])
time_ind = time.time() - t_ind
print('-'*50)
print('Total time to calculate status for indicator {}:'.format(indicator), time_ind)
print('-'*50)
if type(status_by_date) is not bool:
status_by_date['new_index'] = [str(ix) + '_' + wb for ix, wb in zip(status_by_date.index,
status_by_date[self.wb_id_header])]
status_by_date.set_index(keys = 'new_index')
self.indicator_objects[indicator].sld.save_df(status_by_date, file_name = indicator_name + '-by_date',
force_save_txt=True)
if type(status_by_year_pos) is not bool:
status_by_year_pos['new_index'] = [str(ix) + '_' + wb for ix, wb in zip(status_by_year_pos.index,
status_by_year_pos[self.wb_id_header])]
status_by_year_pos.set_index(keys = 'new_index')
self.indicator_objects[indicator].sld.save_df(status_by_year_pos, file_name = indicator_name + '-by_year_pos',
force_save_txt=True)
if type(status_by_year) is not bool:
status_by_year['new_index'] = [str(ix) + '_' + wb for ix, wb in zip(status_by_year.index,
status_by_year[self.wb_id_header])]
status_by_year.set_index(keys = 'new_index')
self.indicator_objects[indicator].sld.save_df(status_by_year, file_name = indicator_name + '-by_year',
force_save_txt=True)
if type(status_by_period) is not bool:
status_by_period['new_index'] = [str(ix) + '_' + wb for ix, wb in zip(status_by_period.index,
status_by_period[self.wb_id_header])]
status_by_period.set_index(keys = 'new_index')
self.indicator_objects[indicator].sld.save_df(status_by_period, file_name = indicator_name + '-by_period',
force_save_txt=True)
#==========================================================================
def calculate_quality_element(self, quality_element=None):
class_name = self.parent_workspace_object.mapping_objects['quality_element'].indicator_config.loc['qe_'+quality_element.lower()]['indicator_class']
print(class_name)
if not hasattr(self, 'quality_element'):
self.quality_element = {}
#print(class_name)
try:
class_ = getattr(core, class_name)
except AttributeError as e:
raise AttributeError('{}\nClass does not exist'.format(e))
#print(class_)
#instance = class_()
# add indicator objects to dictionary
self.quality_element[quality_element] = class_(subset_uuid=self.parent_subset_object.unique_id,
parent_workspace_object = self.parent_workspace_object,
quality_element = quality_element)
#self.quality_element[quality_element].calculate_quality_factor()
self.quality_element[quality_element].calculate_quality_factor()
#==========================================================================
def get_all_file_paths_in_workstep(self):
"""
Returns a sorted list of all file paths in the workstep tree.
Generally this method is used when copying the workstep.
"""
file_list = []
for root, dirs, files in os.walk(self.paths['step_directory']):
for f in files:
file_list.append('/'.join([root, f]).replace('\\', '/'))
return sorted(file_list)
#==========================================================================
def get_data_filter_object(self):
if self.name not in self.allowed_data_filter_steps:
return False
return self.data_filter
#==========================================================================
def get_data_filter_info(self):
"""
Returns a dict with data filter names as keys.
Every key contains a list of the active filters.
"""
return self.data_filter.get_filter_info()
#==========================================================================
def get_water_body_filter_object(self):
return self.water_body_filter
# ==========================================================================
def get_water_body_station_filter_object(self):
return self.water_body_station_filter
#==========================================================================
def get_indicator_data_filter_settings(self, indicator):
"""
Returns the data filter settings for the given indicator.
"""
if self.name not in self.allowed_indicator_settings_steps:
return False
return self.indicator_data_filter_settings.get(indicator, False)
#==========================================================================
def get_indicator_tolerance_settings(self, indicator):
"""
Returns the tolerance settings for the given indicator.
"""
if self.name not in self.allowed_indicator_settings_steps:
return False
return self.indicator_tolerance_settings.get(indicator, False)
#==========================================================================
def get_indicator_ref_settings(self, indicator):
"""
Returns the reference settings for the given indicator.
"""
if self.name not in self.allowed_indicator_settings_steps:
return False
return self.indicator_ref_settings.get(indicator, False)
# #==========================================================================
# def get_water_body_station_filter(self):
# return self.water_body_station_filter
#==========================================================================
def get_indicator_settings_name_list(self):
return sorted(self.indicator_settings.keys())
#==========================================================================
def indicator_setup(self, indicator_list=None):
"""
when step 3 is initiated indicator objects should be instantiated for all indicators selected in step 2 as default
where do we save info on selected indicators? in step_2/datafilters folder?
We can calculate all indicators available but then the indicator selection is useless with regards to saving time for the user.
"""
"""
Created: 20180215 by Lena
Last modified: 20180913 by Magnus
create dict containing indicator objects according to data availability or choice?
Should be run accesed only for step 3.
"""
subset_unique_id =self.parent_subset_object.unique_id
# TODO: assert step 3
if indicator_list == None:
indicator_list = self.parent_workspace_object.available_indicators
if indicator_list == None:
indicator_list = self.parent_workspace_object.get_available_indicators(subset=subset_unique_id, step=2)
if not hasattr(self, 'indicator_objects'):
self.indicator_objects = {}
indicators = dict.fromkeys(indicator_list)
for indicator in indicators:
t_start = time.time()
class_name = self.parent_workspace_object.mapping_objects['quality_element'].indicator_config.loc[indicator]['indicator_class']
#print(class_name)
try:
class_ = getattr(core, class_name)
except AttributeError as e:
raise exceptions.MissingClassForIndicator(message=indicator)
# raise AttributeError('{}\nClass does not exist'.foramt(e))
#print(class_)
#instance = class_()
# add indicator objects to dictionary
self.indicator_objects[indicator] = class_(subset_uuid = subset_unique_id,
parent_workspace_object = self.parent_workspace_object,
indicator = indicator)
time_ind = time.time() - t_start
print('-'*50)
print('Total time to set up indicator object indicator {}:'.format(indicator), time_ind)
print('-'*50)
# self.indicator_objects[indicator] = core.IndicatorBase(subset = subset_unique_id,
# parent_workspace_object = self.parent_workspace_object,
# indicator = indicator)
#==========================================================================
def load_all_files(self):
self._create_file_paths()
self.load_data_filter()
self.load_indicator_settings_filters()
self.load_water_body_station_filter()
#==========================================================================
def load_data_filter(self):
"""
Load all settings files in the current WorkSpace filter folder...
"""
# print('STEP = ', self.name)
self.data_filter = core.DataFilter(self.paths['directory_paths']['data_filters'],
mapping_objects=self.mapping_objects)
#==========================================================================
def load_water_body_filter_object(self):
"""
Load filter object for waterbodies
"""
self.water_body_filter = core.WaterBodyFilter()
# ==========================================================================
def load_water_body_station_filter(self):
# print('load_water_body_station_filter')
self.water_body_station_filter = core.WaterBodyStationFilter(
water_body_settings_directory=self.paths['directory_paths']['water_body_station_filter'],
mapping_objects=self.mapping_objects)
#==========================================================================
def load_indicator_settings_filters(self):
"""
Updated: 20180419 by <NAME>
Loads all types of settings, data and config files/objects.
"""
allowed_steps = ['step_2']
if self.name not in allowed_steps:
return False
# All indicators in directory should be loaded automatically
# Load indicator setting files. Internal attr (_) since they are only used by other objects.
self._indicator_setting_files = {}
for indicator, file_path in self.paths['indicator_settings_paths'].items():
self._indicator_setting_files[indicator] = core.SettingsFile(file_path, mapping_objects=self.mapping_objects)
if self._indicator_setting_files[indicator].indicator != indicator:
self._logger.debug('Missmatch in indicator name and object name! {}:{}'.format(self._indicator_setting_files[indicator].indicator, indicator))
raise IndicatorFileError('Error in indicator settings file',
'Missmatch in indicator name and object name! {}:{}'.format(self._indicator_setting_files[indicator].indicator, indicator))
# Load Filter settings. Filter settings are using indicator_setting_files-objects as data
self.indicator_data_filter_settings = {}
for indicator, obj in self._indicator_setting_files.items():
self.indicator_data_filter_settings[indicator.lower()] = core.SettingsDataFilter(obj)
# Load Ref settings. Filter settings are using indicator_setting_files-objects as data
# TODO: this is doen twice why?
self.indicator_ref_settings = {}
for indicator, obj in self._indicator_setting_files.items():
self.indicator_ref_settings[indicator.lower()] = core.SettingsRef(obj)
# Load Tolerance settings. Filter settings are using indicator_setting_files-objects as data
self.indicator_tolerance_settings = {}
for indicator, obj in self._indicator_setting_files.items():
self.indicator_tolerance_settings[indicator.lower()] = core.SettingsTolerance(obj)
#==========================================================================
def get_results(self, force_loading_txt=False, **kwargs):
"""
Created 20180720 by Magnus
Updated 20180918 by Magnus
Loads all files in the results-directory.
pkl-files are loaded by default if present.
Override this by setting force_loading_txt == True
Data is returned in a dictionary
"""
#self.result_data = {}
result_data = {}
results_directory = self.paths.get('directory_paths', {}).get('results', None)
if results_directory == None:
raise exceptions.MissingPath
if not os.path.exists(results_directory):
raise exceptions.MissingDirectory
file_list = os.listdir(results_directory)
key_list = list(set([item.split('.')[0] for item in file_list]))
save_load_object = core.SaveLoadDelete(results_directory)
for key in key_list:
if kwargs.get('match_string') and kwargs.get('match_string') not in key:
continue
if kwargs.get('by'):
if 'by_' + kwargs.get('by') not in key:
continue
df = save_load_object.load_df(key, load_txt=force_loading_txt)
result_data[key] = df
if not result_data:
exceptions.NoResultsInResultDirectory
return result_data
#==========================================================================
def set_indicator_settings_data_filter(self, indicator=None, filter_settings=None):
"""
filter_settings are dicts like:
filter_settings[type_area][variable] = value
"""
if not self._change_ok():
return
if filter_settings:
filter_object = self.indicator_data_filter_settings[indicator]
filter_object.set_values(filter_settings)
#==========================================================================
def deprecated_save_indicator_settings(self, indicator):
if not self._change_ok():
return
self.indicator_settings[indicator].save_file() # Overwrites existing file if no file_path is given
return True
#==========================================================================
def deprecated_save_all_indicator_settings(self):
if not self._change_ok():
return
all_ok = True
for obj in self.indicator_settings.values():
if not obj.save_file() :
all_ok = False
return all_ok
#==========================================================================
def rename_paths(self, from_name, to_name, pre_directory=''):
"""
Replaces string in all file paths.
"""
for name in self.paths.keys():
if type(self.paths[name]) == dict:
for sub_name in self.paths[name].keys():
self.paths[name][sub_name] = get_new_path(from_name, to_name, self.paths[name][sub_name], pre_directory)
else:
self.paths[name] = get_new_path(from_name, to_name, self.paths[name], pre_directory)
# Rename paths in settings files
for indicator in self._indicator_setting_files.keys():
self._indicator_setting_files[indicator].change_path(self.paths['indicator_settings_paths'][indicator])
# Rename path in water_body_station_filter
self.water_body_station_filter.change_path(self.paths['directory_paths']['water_body_station_filter'])
#==========================================================================
def print_all_paths(self):
"""
Prints all path in the step.
"""
sep_length = 100
self._logger.debug('='*sep_length)
self._logger.debug('='*sep_length)
self._logger.debug('{} paths'.format(self.name))
self._logger.debug('-'*sep_length)
for item in sorted(self.paths.keys()):
if type(self.paths[item]) == dict:
for path in sorted(self.paths[item].keys()):
if type(self.paths[item][path]) == dict:
for p in sorted(self.paths[item][path].keys()):
self._logger.debug('-', self.paths[item][path][p])
else:
self._logger.debug(self.paths[item][path])
else:
self._logger.debug(self.paths[item])
self._logger.debug('')
#==========================================================================
def set_data_filter(self, filter_type='', filter_name='', data=None, save_filter=True, append_items=False):
"""
Sets the data_filter. See core.filters.data_filter.set_filter for more information.
"""
if not self._change_ok():
return
data_filter = self.get_data_filter_object()
data_filter.set_filter(filter_type=filter_type,
filter_name=filter_name,
data=data,
save_filter=save_filter,
append_items=append_items)
return True
#==========================================================================
def set_water_body_station_filter(self, water_body=None, include=True, station_list=None):
if include:
self.water_body_station_filter.include_stations_in_water_body(station_list=station_list, water_body=water_body)
else:
self.water_body_station_filter.exclude_stations_in_water_body(station_list=station_list, water_body=water_body)
#==========================================================================
def show_settings(self):
self._logger.debug('first_filter:')
self.data_filter.show_filter()
###############################################################################
class Subset(object):
"""
Class to hold subset paths and objects.
"""
def __init__(self,
alias=None,
unique_id=None,
parent_directory=None,
mapping_objects={},
parent_workspace_object=None):
if not all([alias, unique_id, parent_directory]):
raise exceptions.MissingInputVariable
self.alias = alias
self.unique_id = unique_id
self.paths = {}
self.paths['parent_directory'] = parent_directory.replace('\\', '/')
self.paths['subset_directory'] = '/'.join([self.paths['parent_directory'], self.unique_id])
self.parent_workspace_object = parent_workspace_object
self.paths['directory_path_log'] = self.parent_workspace_object.paths['directory_path_log']
self.wb_id_header = self.parent_workspace_object.wb_id_header
self.mapping_objects = mapping_objects
self._initiate_attributes()
self._load_steps()
# Add logger
if self.unique_id:
self._set_logger(self.unique_id)
self._set_loggers_to_steps()
#==========================================================================
def _initiate_attributes(self):
self.nr_steps = 5
self.steps = {}
self.available_indicators = []
#==========================================================================
def _set_logger(self, log_id):
# Add logger
core.add_log(log_id=log_id,
log_directory=self.paths['directory_path_log'],
log_level='DEBUG',
on_screen=True,
prefix='subset')
self._logger = core.get_log(log_id)
# self._logger.debug('Logger set for subset {} with unique_id {}'.format(self. name, log_id))
#==========================================================================
def _set_loggers_to_steps(self):
for step in self.steps.keys():
self.steps[step]._logger = self._logger
#==========================================================================
def _change_ok(self):
"""
Check to make sure that the default
"""
if self.parent_subset_object and self.parent_workspace_object.unique_id == 'default_workspace':
self._logger.warning('Not allowed to change default workspace!')
return False
elif self.unique_id == 'default_subset':
self._logger.warning('Not allowed to change default subset!')
return False
return True
#==========================================================================
def deprecated__load_subset_config(self):
self.config_object = Config(self.paths['subset_directory'] + '/subset.cfg')
#==========================================================================
def _load_steps(self):
if not os.path.exists(self.paths['subset_directory']):
os.makedirs(self.paths['subset_directory'])
step_list = [item for item in os.listdir(self.paths['subset_directory']) if '.' not in item]
for step in step_list:
self._load_workstep(step)
#==========================================================================
def deprecated__add_files_from_subset(self, subset_object=None, overwrite=False):
"""
Copy files from given subset. Option to overwrite or not.
This method is used to copy (branching) an entire subset.
"""
for step in subset_object.get_step_list():
self._load_workstep(step)
step_object = subset_object.get_step_object(step)
self.steps[step].add_files_from_workstep(step_object=step_object,
overwrite=overwrite)
# Copy config file
# This is done in Workspace since new uuid needs to be given
# if os.path.exists(subset_object.config_file_path):
# if os.path.exists(self.config_object_file_path) and not overwrite:
# return False
#
# shutil.copy(subset_object.config_file_path, self.config_object_file_path)
# self._load_config()
return True
#==========================================================================
def _load_workstep(self, step=None):
step = get_step_name(step)
if not step:
return False
self.steps[step] = WorkStep(name=str(step),
parent_directory=self.paths['subset_directory'],
mapping_objects=self.mapping_objects,
parent_workspace_object=self.parent_workspace_object,
parent_subset_object=self)
return True
#==========================================================================
def deprecated_delete_workstep(self, step=None):
"""
step is like 'step_1', 'step_2' and so on.
"""
if step in self.subset_dict.keys():
# TODO: Delete files and directories. How to make this safe?
self.steps.pop(step)
#==========================================================================
def deprecated_get_alias(self):
alias = self.config_object.get_config('alias')
if not alias:
return ''
#==========================================================================
def deprecated__set_unique_id(self):
"""
Sets a unique id (UUID) to the subset. Will not overwrite an existing one.
"""
self.unique_id = self.config_object.set_unique_id()
#==========================================================================
def get_all_file_paths_in_subset(self):
"""
Returns a sorted list of all file paths in the subset tree.
"""
file_list = []
for root, dirs, files in os.walk(self.paths['subset_directory']):
for f in files:
file_list.append('/'.join([root, f]).replace('\\', '/'))
return sorted(file_list)
#==========================================================================
def get_data_filter_info(self, step):
"""
Returns a dict with information about the active filters.
"""
data_filter = self.get_data_filter_object(step)
if not data_filter:
return False
return data_filter.get_data_filter_info()
#==========================================================================
def get_data_filter_object(self, step):
"""
Returns the data filter for the given step.
"""
step = get_step_name(step)
if step not in self.get_step_list():
return False
return self.steps[step].data_filter
#==========================================================================
def get_step_list(self):
return sorted(self.steps.keys())
#==========================================================================
def get_step_object(self, step):
step = get_step_name(step)
return self.steps.get(step, False)
#==========================================================================
def load_data(self, step):
if step not in self.steps.keys():
self._logger.debug('Invalid step "{}" given to load data in subset "{}"!'.format(step, self.name))
return False
self.steps[step].load_data()
#==========================================================================
def deprecated_get_step_1_object(self):
return self.get_step_object('step_1')
#==========================================================================
def deprecated_get_step_2_object(self):
return self.get_step_object('step_2')
#==========================================================================
def deprecated_set_alias(self, alias):
self._logger.debug('New alias for subset "{}" => "{}"'.format(self.config_object.get_config('alias'), alias))
self.config_object.set_config('alias', alias)
#==========================================================================
def deprecated_rename_paths(self, from_name, to_name, pre_directory=''):
for name in self.paths.keys():
if type(self.paths[name]) == dict:
for sub_name in self.paths[name].keys():
self.paths[name][sub_name] = get_new_path(from_name, to_name, self.paths[name][sub_name], pre_directory)
else:
self.paths[name] = get_new_path(from_name, to_name, self.paths[name], pre_directory)
#==========================================================================
def deprecated_rename_subset(self, new_name):
if new_name.lower() in ['subset', 'default_subset']:
self._logger.debug('Invalid name "{}" for subset!'.format(new_name))
return False
current_directory = self.paths['subset_directory']
new_directory = '{}/{}'.format(self.paths['parent_directory'], new_name)
# Rename all paths in subset
self.rename_paths(self.name, new_name, pre_directory='subsets')
# Rename paths in steps
for step in self.steps.keys():
self.steps[step].rename_paths(self.name, new_name, pre_directory='subsets')
# Rename directoy
os.rename(current_directory, new_directory)
# Set path in config file
self.config_object.set_path(self.name, new_name, pre_directory='subsets')
self.name = new_name
return True
#==========================================================================
def deprecated_rename_workspace(self, from_name, to_name):
# Rename all paths in subset
self.rename_paths(from_name, to_name, pre_directory='workspaces')
# Rename paths in steps
for step in self.steps.keys():
self.steps[step].rename_paths(from_name, to_name, pre_directory='workspaces')
# Set path in config file
self.config_object.set_path(from_name, to_name, pre_directory='workspaces')
return True
#==========================================================================
def print_all_paths(self):
"""
Prints all path in the subset.
"""
sep_length = 100
self._logger.debug('='*sep_length)
self._logger.debug('='*sep_length)
self._logger.debug('Subset {} paths'.format(self.name))
self._logger.debug('-'*sep_length)
for item in sorted(self.paths.keys()):
if type(self.paths[item]) == dict:
for path in sorted(self.paths[item].keys()):
if type(self.paths[item][path]) == dict:
for p in sorted(self.paths[item][path].keys()):
self._logger.debug('-', self.paths[item][path][p])
else:
self._logger.debug(self.paths[item][path])
else:
self._logger.debug(self.paths[item])
for step in sorted(self.steps.keys()):
self.steps[step].print_all_paths()
self._logger.debug('')
#==========================================================================
def set_data_filter(self, step='', filter_type='', filter_name='', data=None, save_filter=True, append_items=False):
step_object = self.get_step_object(step)
if not step_object:
return False
return step_object.set_data_filter(filter_type=filter_type,
filter_name=filter_name,
data=data,
save_filter=save_filter,
append_items=append_items)
###############################################################################
class WorkSpace(object):
"""
Created ???????? by <NAME>
Updated 20180601 by <NAME>
Class to hold and alter a workspace.
name is UUID.
"""
def __init__(self,
alias=None,
unique_id=None,
parent_directory=None,
resource_directory=None,
nr_subsets_allowed=4,
mapping_objects=None,
user_id=None):
if not all([alias, unique_id, parent_directory, user_id]):
raise exceptions.MissingInputVariable
if not nr_subsets_allowed:
raise exceptions.MissingInputVariable
# Initiate paths
self.paths = {}
self.alias = alias
self.unique_id = unique_id
self.user_id = user_id
self.mapping_objects = mapping_objects
self.wb_id_header = 'MS_CD'
self.all_status = ['editable', 'readable', 'deleted']
self.paths['parent_directory'] = parent_directory.replace('\\', '/')
self.paths['resource_directory'] = resource_directory.replace('\\', '/')
self.nr_subsets_allowed = nr_subsets_allowed
self._initiate_attributes()
# Load UUID mapping file for subsets
# 20180601 MW added user_id as argument
self.uuid_mapping = core.UUIDmapping('{}/uuid_mapping.txt'.format(self.paths['directory_path_subset']), user_id=self.user_id)
self._setup_workspace()
# Add logger
if self.unique_id:
self._set_logger(self.unique_id)
self._set_loggers_to_steps()
#self.deprecated_load_config_files()
#==========================================================================
def _add_subset(self, unique_id=None):
assert unique_id, 'No subset name given!'
if unique_id == 'default_subset':
alias = 'default_subset'
else:
alias = self.uuid_mapping.get_alias(unique_id)
if unique_id in self.subset_dict.keys():
self._logger.debug('Given subset "{}" with alias "{}" is already present!'.format(unique_id, alias))
return False
self.paths['directory_path_subsets'][unique_id] = self.paths['directory_path_subset'] + '/{}'.format(unique_id)
self.subset_dict[unique_id] = Subset(alias=alias,
unique_id=unique_id,
parent_directory=self.paths['directory_path_subset'],
mapping_objects=self.mapping_objects,
parent_workspace_object=self)
return unique_id
#==========================================================================
def _change_ok(self):
"""
Check to make sure that default workspace is not changed.
"""
if self.unique_id == 'default_workspace':
self._logger.debug('Not allowed to change default workspace!')
return False
return True
#==========================================================================
def _initiate_attributes(self):
# Setup default paths
self.paths['mapping_directory'] = '/'.join([self.paths['resource_directory'], 'mappings'])
self.paths['workspace_directory'] = '/'.join([self.paths['parent_directory'], self.unique_id])
self.paths['directory_path_subsets'] = {}
self.paths['directory_path_input_data'] = self.paths['workspace_directory'] + '/input_data'
self.paths['directory_path_raw_data'] = self.paths['directory_path_input_data'] + '/raw_data'
self.paths['directory_path_export_data'] = self.paths['directory_path_input_data'] + '/exports'
self.paths['directory_path_subset'] = self.paths['workspace_directory'] + '/subsets'
self.paths['directory_path_log'] = self.paths['workspace_directory'] + '/log'
self.paths['directory_path_cache'] = self.paths['workspace_directory'] + '/cache'
# Create directories if not present
for key, p in self.paths.items():
if type(p) == str:
if not os.path.exists(p):
os.makedirs(p)
# Step
self.step_0 = None
# Subset
self.subset_dict = {}
# self.dtype_settings = core.RawDataFiles(self.paths['directory_path_raw_data'])
self.datatype_settings = core.DataTypeMapping(self.paths['directory_path_input_data'])
#==========================================================================
def deprecated_load_config_files(self):
self.cf_df = pd.read_csv(self.paths['resource_directory'] + '/Quality_Elements.cfg', sep='\t', dtype='str', encoding='cp1252')
assert all(['quality element' in self.cf_df.keys(), 'indicator' in self.cf_df.keys(), 'parameters' in self.cf_df.keys()]), 'configuration file must contain quality element, indicator and parameters information'
self.cfg = {}
self.cfg['quality elements'] = self.cf_df.groupby('quality element')['indicator'].unique()
self.cfg['indicators'] = self.cf_df.groupby('indicator')['parameters'].unique()
# for QE in self.cfg['quality elements']:
# self.cfg[QE] = self.cf_df.groupby(QE)['indicator'].unique()
# for indicator in self.cfg['indicators']:
# self.cfg[indicator] = self.cf_df.groupby(QE)['parameters'].split(',')
#==========================================================================
def _load_workstep(self, subset=None, step=None):
subset_object = self.get_subset_object(subset)
if not subset_object:
return False
return subset_object._load_workstep(step)
#==========================================================================
def _set_logger(self, log_id):
# Add logger
core.add_log(log_id=log_id,
log_directory=self.paths['directory_path_log'],
log_level='DEBUG',
on_screen=True,
prefix='workspace')
self._logger = core.get_log(log_id)
# self._logger.debug('Logger set for workspace {} with unique_id {}'.format(self. name, self.unique_id))
#==========================================================================
def _set_loggers_to_steps(self):
self.step_0._logger = self._logger
for subset in self.subset_dict.keys():
self.subset_dict[subset]._set_loggers_to_steps()
#==========================================================================
def _setup_workspace(self):
"""
Adds paths and objects for the workspace.
"""
# Create input data folder if non existing
# if not os.path.exists(self.paths['directory_path_raw_data']):
# os.makedirs(self.paths['directory_path_raw_data'])
#
# # Create raw data folder if non existing
# if not os.path.exists(self.paths['directory_path_input_data']):
# os.makedirs(self.paths['directory_path_input_data'])
#
# # Initiate one subset as default
# if not os.path.exists(self.paths['directory_path_subset']):
# os.makedirs(self.paths['directory_path_subset'])
subsets = [item for item in os.listdir(self.paths['directory_path_subset']) if '.' not in item]
# self._logger.debug('subsets', subsets)
if subsets:
for s in subsets:
self._add_subset(s)
else:
self._add_subset('default_subset')
# Load config file
# self._load_workspace_config()
# Step 0
self.step_0 = WorkStep(name='step_0',
parent_directory=self.paths['workspace_directory'],
mapping_objects=self.mapping_objects,
parent_workspace_object=self)
# Set data and index handler
self.data_handler = core.DataHandler(input_data_directory=self.paths['directory_path_input_data'],
resource_directory=self.paths['resource_directory'],
mapping_objects=self.mapping_objects,
wb_id_header=self.wb_id_header)
self.index_handler = core.IndexHandler(workspace_object=self,
data_handler_object=self.data_handler)
# Load Cache object
self.cache = core.Cache(self.paths['directory_path_cache'], mandatory_uuid=True, min_nr_arguments=3)
#==========================================================================
def import_file(self, file_path=None, data_type=None, status=0, force=True):
"""
Created ???????? by
Updated 20180721 by <NAME>
Imports a data file to the raw_data directory in the workspace.
Also adds information to the datatype_settings-object.
this method does not load data.
"""
assert all([file_path, data_type]), 'Not enough input arguments to import file'
# Not able to load data into default workspace
if not self._change_ok():
print('1')
return False
if not os.path.exists(file_path):
print('2')
return False
# Copy file
target_file_path = '/'.join([self.paths['directory_path_raw_data'], os.path.basename(file_path)])
if os.path.exists(target_file_path):
if force:
os.remove(target_file_path)
else:
print('3')
return False
shutil.copyfile(file_path, target_file_path)
# Add file to dtype_settings file
# self.dtype_settings.add_file(file_path=file_path, data_type=data_type)
self.datatype_settings.add_file(file_name=file_path, data_type=data_type, status=status)
#==========================================================================
def import_default_data(self, force=True):
"""
Created ???????? by Lena
Updated 20180424 by <NAME>
Imports default data from the resources directory to input raw_data directory in workspace.
"""
# Not able to load data into default workspace
if not self._change_ok():
return False
source_directory = self.paths['resource_directory'] + '/default_data'
file_name_list = os.listdir(source_directory)
all_datatypes = [u'chlorophyll',
u'physicalchemical',
u'physicalchemicalmodel',
u'phytoplankton',
u'zoobenthos']
# Copy files
for file_name in file_name_list:
datatype = file_name.split('_')[0]
if datatype not in all_datatypes:
continue
source_file_path = '/'.join([source_directory, file_name])
self.import_file(file_path=source_file_path, data_type=datatype, force=force)
self._logger.debug('Default data file has been copied to workspace raw data folder: {}'.format(file_name))
# Have to load and sync to set status and loaded as int. Dont know why...
self.datatype_settings.load_and_check_dtype_settings()
# # Load data
# self.load_all_data()
# self._logger.debug('Data has been loaded in import_default_data. flag "load_data" was set to True')
# Update dtype_settings object
# all_ok = self.dtype_settings.load_and_sync_dtype_settings()
# all_ok = self.dtype_settings.load_and_check_dtype_settings()
# all_ok = self.datatype_settings.load_and_check_dtype_settings()
#
# if not all_ok:
# self._logger.warning('Default data not loaded correctly!')
# return False
return True
#==========================================================================
def set_status_for_file(self, file_list, status):
"""
Created 20180424 by <NAME>
Updated 20180424 by <NAME>
"""
if status:
status = 1
else:
status = 0
self.datatype_settings.set_status(file_list, status)
#==========================================================================
def apply_data_filter(self, step=None, subset=None, apply_all_previous_filters=True):
"""
Created 20180608 by <NAME>
Updated
Applies the data filter on the step given.
If apply_all_previous_filters==True all previous steps are applied before.
"""
if apply_all_previous_filters:
step = get_step_name(step)
step_nr = int(step[-1])
for s in range(step_nr+1):
# print('s', s)
# self._apply_data_filter(step=s, subset=subset)
if s==0:
self._apply_data_filter(step=s) # Cannot handle subset if step=0
else:
self._apply_data_filter(step=s, subset=subset)
else:
self._apply_data_filter(step=step, subset=subset)
#==========================================================================
def _apply_data_filter(self, step=None, subset=None):
"""
Created ???????? by <NAME>
Updated 20180220 by <NAME>
Applies data filter to the index handler.
Input:
step: step that the data filter should be applied on.
data_filter can be applied on step 0, 1 and 2
subset: subset to apply filter on. Must be provided if step is > 0
Output:
True: If all is ok
False: If something faild
"""
step = get_step_name(step)
if step == 'step_0':
filter_object = self.step_0.data_filter
elif int(step[-1]) > 2:
self._logger.debug('No data filter in {}'.format(step))
return False
elif subset not in self.get_subset_list():
self._logger.debug('Provides subset "{}" not in subset list'.format(subset))
return False
else:
subset_object = self.get_subset_object(subset)
step_object = subset_object.get_step_object(step)
filter_object = step_object.get_data_filter_object()
all_ok = self.index_handler.add_filter(filter_object=filter_object, step=step, subset=subset)
return all_ok
#==========================================================================
def apply_indicator_data_filter(self, subset='', indicator='', step='step_2', water_body_list = False):
"""
Created ???????? by <NAME>
Updated 20180719 by <NAME>
Applies indicator data filter to the index handler. Step 2. Applies filter for all water_bodies.
Input:
subset: subset to apply filter on.
indicator: name of indicator to apply as a string, ex. "din_winter"
step: step_2 is default
Output:
True: If all is ok
False: If something faild
"""
t_tot = time.time()
if not water_body_list:
water_body_list = self.get_filtered_data(step='step_1', subset=subset)[self.wb_id_header].unique()
if not len(water_body_list):
#raise Error?
self._logger.warning('No waterbodies in filtered data')
return False
if subset not in self.get_subset_list():
self._logger.debug('Provided subset "{}" not in subset list'.format(subset))
return False
# Find first year of filtered data from step 1.
kwargs = {}
df = self.get_filtered_data(subset=subset, step=1)
kwargs['remove_data_before_year'] = min(df['MYEAR'])
# Get subset and step objects
step_nr = step
step = get_step_name(step)
subset_object = self.get_subset_object(subset)
# Indicator_settings are linked to step 2 by default
step_object = subset_object.get_step_object(step)
step_object.load_water_body_filter_object()
# Get filter objects
water_body_filter_object = step_object.get_water_body_filter_object()
water_body_station_filter_object = step_object.get_water_body_station_filter_object()
indicator = indicator.lower()
settings_filter_object = step_object.get_indicator_data_filter_settings(indicator)
if step not in self.index_handler.booleans['step_0'][subset]['step_1'].keys():
# If step_2 filter has not been added, add this now.
self.apply_data_filter(step=step_nr, subset=subset)
# self.index_handler.add_filter(filter_object=water_body_filter_object, step=step, subset=subset, **kwargs)
# TODO: should water_body be None above and should this not be moved outside the water_body loop?
# Also it is wrong that the filter object here is water_body_filter_object
#set filters for all indicator in all waterbodies and if no key in boolean dict for waterbody add waterbody filter
for water_body in dict.fromkeys(water_body_list, True):
type_area = self.mapping_objects['water_body'].get_type_area_for_water_body(water_body, include_suffix=True)
if type_area == '':
continue
if water_body not in self.index_handler.booleans['step_0'][subset]['step_1']['step_2'].keys():
# If a filter for the waterbody has not been added, add this now
if bool(water_body_station_filter_object.get_list(water_body=water_body)) | \
bool(water_body_station_filter_object.get_list(include=False, water_body=water_body)):
# station filter for waterbody
self.apply_water_body_station_filter(subset=subset, water_body=water_body, **kwargs)
else:
# no station filter for waterbody
self.apply_water_body_filter(subset=subset, water_body=water_body, **kwargs)
# self.index_handler.add_filter(filter_object=water_body_filter_object, step=step, subset=subset,
# water_body=water_body, **kwargs)
# index_handler should have filters for step and waterbody, now add filter for the indicator from
# the filter object for settings
self.index_handler.add_filter(filter_object=settings_filter_object, step=step, subset=subset,
indicator=indicator, water_body=water_body, **kwargs)
temp_df_1 = self.get_filtered_data(step=2, subset=subset, water_body=water_body)
temp_df_2 = self.get_filtered_data(step=2, subset=subset, indicator=indicator, water_body=water_body)
if len(temp_df_2['WATER_BODY_NAME'].unique()) > 1:
print('waterbodies after station filter and {} filter {}'.format(indicator, temp_df_2['WATER_BODY_NAME'].unique()))
print('statn after filters {}'.format(temp_df_2['STATN'].unique()))
print('-'*50)
# print(temp_df_2['WATER_BODY_NAME'].unique(), temp_df_2['STATN'].unique())
time_total = time.time() - t_tot
print('-'*50)
print('Total time to apply data filters for indicator {}:'.format(indicator), time_total)
print('-'*50)
# ==========================================================================
def apply_water_body_filter(self, subset=None, water_body=None, **kwargs):
"""
set boolean for waterbody key in boolean dict. Should look in all relevant filterobjects for the waterbody
filterobjects currently used: WaterBodyFilter, WaterBodyStationFilter
use or between filters to get boolean
:param subset:
:param waterbody:
:return: no return
"""
step = 2
if subset not in self.get_subset_list():
self._logger.debug('Provides subset "{}" not in subset list'.format(subset))
return False
else:
# Get subset and step objects
step = get_step_name(step)
subset_object = self.get_subset_object(subset)
step_object = subset_object.get_step_object(step)
# Get filter objects
water_body_filter_object = step_object.get_water_body_filter_object()
all_ok = self.index_handler.add_filter(filter_object=water_body_filter_object, step=step,
subset=subset, water_body=water_body, **kwargs)
return all_ok
# ==========================================================================
def apply_water_body_station_filter(self, subset=None, water_body=None, **kwargs):
"""
Filter is applied in step 2.
"""
step = 2
if subset not in self.get_subset_list():
self._logger.debug('Provided subset "{}" not in subset list'.format(subset))
return False
else:
step = get_step_name(step)
subset_object = self.get_subset_object(subset)
# Indicator_settings are linked to step 2 by default
step_object = subset_object.get_step_object(step)
filter_object = step_object.get_water_body_station_filter_object()
all_ok = self.index_handler.add_filter(filter_object=filter_object, step=step,
subset=subset, water_body=water_body, **kwargs)
temp_df = self.get_filtered_data(step=2, subset=subset)
temp_df = self.get_filtered_data(step=2, subset=subset, water_body=water_body)
return all_ok
#==========================================================================
def copy_subset(self, source_uuid=None, target_alias=None):
"""
Created 20180219 by <NAME>
Updated 20180601 by <NAME>
Creates a copy of a subset.
"""
assert all([source_uuid, target_alias])
if not source_uuid:
self._logger.warning('No subset named "{}"'.format(source_uuid))
raise exceptions.SubsetNotFound
# return False
# Add UUID for workspace in uuid_mapping
target_uuid = self.uuid_mapping.add_new_uuid_for_alias(target_alias)
if not target_uuid:
self._logger.debug('Could not add subset with alias "{}". Subset already exists!'.format(target_alias))
raise exceptions.SubsetAlreadyExists
# return False
# Copy all directories and files in subset
source_subset_path = '/'.join([self.paths['directory_path_subset'], source_uuid])
target_subset_path = '/'.join([self.paths['directory_path_subset'], target_uuid])
# print('source_subset_path:', source_subset_path)
# print('target_subset_path:', target_subset_path)
# Copy files
shutil.copytree(source_subset_path, target_subset_path)
# Load subset
self._add_subset(target_uuid)
target_status = self.uuid_mapping.get_status(unique_id=target_uuid) # Check in case default is changed
return {'alias': target_alias,
'subset_uuid': target_uuid, # MW: 20180524
'status': target_status}
#==========================================================================
def request_subset_list(self):
# TODO: update this!
"""
Created 20180219 by <NAME>
Updated 20180219 by <NAME>
Returns a list with dicts with keys:
alias
uid
status
Information is taken from uuid_mapping. No data has to be loaded.
"""
return_list = []
for alias in self.uuid_mapping.get_alias_list_for_user(self.user_id, status=self.all_status):
return_list.append({'alias': alias,
'uuid': self.uuid_mapping.get_uuid(alias, self.user_id, self.all_status),
'status': self.uuid_mapping.get_status(alias, self.user_id)})
return return_list
#==========================================================================
def print_all_paths(self):
"""
Prints all path in the workspace.
"""
sep_length = 100
self._logger.debug('='*sep_length)
self._logger.debug('='*sep_length)
self._logger.debug('Workspace root paths')
self._logger.debug('-'*sep_length)
for item in sorted(self.paths.keys()):
if type(self.paths[item]) == dict:
for path in sorted(self.paths[item].keys()):
if type(self.paths[item][path]) == dict:
for p in sorted(self.paths[item][path].keys()):
self._logger.debug('-', self.paths[item][path][p])
else:
self._logger.debug(self.paths[item][path])
else:
self._logger.debug(self.paths[item])
self.step_0.print_all_paths()
for subset in sorted(self.subset_dict.keys()):
self.subset_dict[subset].print_all_paths()
self._logger.debug('')
#==========================================================================
def data_is_loaded(self):
"""
Created 20180720 by <NAME>
Updated
Returns True if data is loaded else return False.
"""
if len(self.data_handler.all_data):
return True
else:
return False
#==========================================================================
def data_is_available(self):
"""
Created 20180720 by <NAME>
Updated
Returns True if data is available else return False.
"""
return self.datatype_settings.has_data()
#==========================================================================
def delete_all_export_data(self):
"""
Created 20180423 by <NAME>
Updated 20180423 by <NAME>
Permanently deletes all files in the data export directory.
Also sets column "loaded" in datatype_settings.txt to 0.
"""
for file_name in os.listdir(self.paths['directory_path_export_data']):
file_path = '/'.join([self.paths['directory_path_export_data'], file_name])
os.remove(file_path)
# Reset loaded in datatype_settings
self.datatype_settings.reset_loaded()
self._logger.debug('All files in export directory are deleted and all "loaded" in datatype_settings is 0.')
#==========================================================================
def delete_alldata_export(self):
"""
Created 20180411 by <NAME>
Updated 20180525 by Magnus
Permanently deletes all_data.txt and all_data.pickle.
"""
sld_object = core.SaveLoadDelete(self.paths['directory_path_export_data'])
sld_object.delete_files('all_data')
# sld_object.delete_files('all_data_raw')
# try:
# os.remove(self.paths['directory_path_input_data'] + '/exports/all_data.txt')
# self._logger.debug('all_data.txt deleted')
# except OSError:
# pass
# try:
# os.remove(self.paths['directory_path_input_data'] + '/exports/all_data.pickle')
# self._logger.debug('all_data.pickle deleted')
# except OSError:
# pass
#==========================================================================
def delete_datatype_export(self, datatype):
"""
Created: 20180422 by <NAME>
Last modified: 20180422 by <NAME>
Permanently deletes the raw_format and column_format data files for the given datatype.
"""
try:
file_name = 'column_format_{}_data.txt'.format(datatype)
os.remove('{}/{}'.format(self.paths['directory_path_export_data'], file_name))
self._logger.debug('{} deleted'.format(file_name))
except OSError:
pass
try:
file_name = 'column_format_{}_data.pickle'.format(datatype)
os.remove('{}/{}'.format(self.paths['directory_path_export_data'], file_name))
self._logger.debug('{} deleted'.format(file_name))
except OSError:
pass
try:
file_name = 'raw_format_{}_data.txt'.format(datatype)
os.remove('{}/{}'.format(self.paths['directory_path_export_data'], file_name))
self._logger.debug('{} deleted'.format(file_name))
except OSError:
pass
#==========================================================================
def delete_subset(self, alias=None, unique_id=None, permanently=False):
"""
Created 20180219 by <NAME>
Updated 20180219 by <NAME>
Permanently deletes the given subset.
"""
if alias:
unique_id = self.uuid_mapping.get_uuid(alias=alias, user_id=self.user_id)
else:
alias = self.uuid_mapping.get_alias(unique_id=unique_id)
# if unique_id not in self.subset_dict.keys():
# self._logger.warning('Subset "{}" with alias "{}" does not exist!'.format(unique_id, alias))
# return False
if permanently:
path_to_remove = self.paths['directory_path_subsets'].get(unique_id)
if not ('workspace' in path_to_remove) & ('subset' in path_to_remove):
self._logger.error('Trying to remove subset but the path to delete is not secure!')
return False
self._logger.warning('Permanently deleting subset "{}" with alias "{}".'.format(unique_id, alias))
# Delete files and folders:
shutil.rmtree(path_to_remove)
# Remove objects and links
if unique_id in self.subset_dict.keys():
self.subset_dict.pop(unique_id)
self.paths['directory_path_subsets'].pop(unique_id)
# Remove in uuid_mapping
self.uuid_mapping.permanent_delete_uuid(unique_id)
else:
self._logger.warning('Removing subset "{}" with alias "{}".'.format(unique_id, alias))
self.uuid_mapping.set_status(unique_id, 'deleted')
self.uuid_mapping.set_inactive(unique_id)
return True
#==========================================================================
def get_all_file_paths_in_workspace(self):
"""
Returns a sorted list of all file paths in the workspace tree.
"""
file_list = []
for root, dirs, files in os.walk(self.paths['workspace_directory']):
for f in files:
file_list.append('/'.join([root, f]).replace('\\', '/'))
return sorted(file_list)
#==========================================================================
def get_all_file_paths_in_input_data(self):
file_list = []
for root, dirs, files in os.walk(self.paths['directory_path_input_data']):
for f in files:
file_list.append('/'.join([root, f]).replace('\\', '/'))
return sorted(file_list)
#==========================================================================
def get_alias_for_unique_id(self, unique_id):
return self.uuid_mapping.get_alias(unique_id=unique_id)
#==========================================================================
def get_unique_id_for_alias(self, alias):
return self.uuid_mapping.get_uuid(alias)
#==========================================================================
def get_data_filter_object(self, step=None, subset=None):
step_object = self.get_step_object(step=step, subset=subset)
if not step_object:
return False
return step_object.get_data_filter_object()
#==========================================================================
def get_water_body_station_filter_object(self, subset=None):
step = 2
step_object = self.get_step_object(step=step, subset=subset)
if not step_object:
return False
return step_object.get_water_body_station_filter()
#==========================================================================
def get_data_filter_info(self, step=None, subset=None):
data_filter = self.get_data_filter_object(step=step, subset=subset)
if not data_filter:
return False
return data_filter.get_filter_info()
def get_data_for_waterstool(self, step=None, subset=None):
if step != 3:
return False
step_object = self.get_step_object(step = step, subset = subset)
result_directory = step_object.paths['step_directory']+'/output/results/'
indicator_filelist = [f for f in os.listdir(result_directory) if '-by_date.pkl' in f]
sld = core.SaveLoadDelete(result_directory)
indicator_dict = {}
datatype_dict = {'physchem': [], 'phytoplankton': [], 'chlorophyll': [], 'zoobenthos': []}
# TODO put this in config file
indicator2datatype = {'ntot_winter': 'physchem', 'ptot_winter': 'physchem',
'din_winter': 'physchem', 'dip_winter': 'physchem',
'ntot_summer': 'physchem', 'ptot_summer': 'physchem',
'secchi': 'physchem', 'oxygen': 'physchem',
'biov': 'phytoplankton', 'chl': 'chlorophyll',
'bqi': 'zoobenthos'}
for indicator in indicator_filelist:
# if not os.path.exists(sld.directory + indicator + '-by_date.pkl') or not os.path.exists(sld.directory +indicator + '-by_date.txt'):
# pass #self.indicator_dict[indicator] = False
# else:
datatype = indicator2datatype[indicator.split('-')[0][10:]]
datatype_dict[datatype].append(sld.load_df(file_name = indicator))
indicator_dict[indicator] = sld.load_df(file_name = indicator) # + '-by_date'
column_mapping = pd.read_csv(self.paths['resource_directory'] + '/mappings/waters_column_mapping.txt',
sep='\t', encoding='cp1252')
c = {key: value[0] for key, value in column_mapping.to_dict('list').items()}
remove_cols = ['REFERENCE_VALUE', 'HG_VALUE_LIMIT', 'GM_VALUE_LIMIT', 'MP_VALUE_LIMIT', 'PB_VALUE_LIMIT',
'global_EQR', 'local_EQR']
for key, item in datatype_dict.items():
df_list = item
df = pd.concat(df_list)
df = df.rename(columns=c)
col_list = df.columns
col_list = [c for c in col_list if c not in remove_cols]
sld.save_df(df[col_list], 'WATERS_export_'+key, force_save_txt=True)
df_list = list(indicator_dict.values())
all_df = pd.concat(df_list)
column_mapping = | pd.read_csv(self.paths['resource_directory'] + '/mappings/waters_column_mapping.txt', sep='\t', encoding='cp1252') | pandas.read_csv |
'''
Tests for Naive benchmark classes
Tests currently cover:
1. Forecast horizons
2. Allowable input types: np.ndarray, pd.DataFrame, pd.Series
3. Failure paths for abnormal input such as np.nan, non numeric,
empty arrays and np.Inf
4. Predictions
- naive1 - carries forward last value
- snaive - carries forward previous h values
- average - flat forecast of average
- drift - previous value + gradient
- ensemble naive - the average of all of the methods
- Test fit_predict()
5. Prediction intervals
- horizon
- sets i.e. 2 sets of intervals (0.8 and 0.95)
- width
- bootstrapped prediction intervals
- length of horizon
- number of sets of intervals returned.
6. Fitted values
- expected length
- count of NaN
'''
import pytest
import pandas as pd
import numpy as np
import forecast_tools.baseline as b
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_naive1_forecast_horizon(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Naive1()
model.fit(pd.Series(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_naive1_fit_predict(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Naive1()
# fit_predict for point forecasts only
preds = model.fit_predict(pd.Series(data), horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_snaive_forecast_horizon(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.SNaive(1)
model.fit(pd.Series(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_snaive_fit_predict(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.SNaive(1)
# fit_predict for point forecasts only
preds = model.fit_predict(pd.Series(data), horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_drift_forecast_horizon(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Drift()
model.fit(np.array(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_drift_fit_predict(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Drift()
# fit_predict for point forecasts only
preds = model.fit_predict(pd.Series(data), horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_average_forecast_horizon(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Average()
model.fit(pd.Series(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_average_fit_predict(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Average()
# fit_predict for point forecasts only
preds = model.fit_predict(pd.Series(data), horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_average_forecast_input_numpy(data, horizon, expected):
'''
test the average class accepts numpy array
'''
model = b.Average()
model.fit(np.array(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_average_forecast_input_series(data, horizon, expected):
'''
test the average class accepts pandas series
'''
model = b.Average()
model.fit(pd.Series(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_average_forecast_input_dataframe(data, horizon, expected):
'''
test the average baseline class accept dataframe
'''
model = b.Average()
model.fit(pd.DataFrame(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_naive1_forecast_input_dataframe(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Naive1()
model.fit(pd.DataFrame(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_naive1_forecast_input_series(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Naive1()
model.fit(pd.Series(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_naive1_forecast_input_numpy(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Naive1()
model.fit(np.array(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_snaive_forecast_input_series(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.SNaive(1)
model.fit(pd.Series(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_snaive_forecast_input_dataframe(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.SNaive(1)
model.fit(pd.DataFrame(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_drift_forecast_input_numpy(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Drift()
model.fit(np.array(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_drift_forecast_input_series(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Drift()
model.fit(pd.Series(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_drift_forecast_input_dataframe(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Drift()
model.fit(pd.DataFrame(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_ensemble_forecast_input_dataframe(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.EnsembleNaive(2)
model.fit(pd.DataFrame(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_ensemble_forecast_input_series(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.EnsembleNaive(2)
model.fit(pd.Series(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, horizon, expected",
[([1, 2, 3, 4, 5], 12, 12),
([1, 2, 3, 4, 5], 24, 24),
([1, 2, 3], 8, 8)
])
def test_ensemble_forecast_input_numpy(data, horizon, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.EnsembleNaive(2)
model.fit(np.array(data))
# point forecasts only
preds = model.predict(horizon)
assert len(preds) == expected
@pytest.mark.parametrize("data, exception",
[(np.array([]), ValueError),
(1.0, TypeError),
(np.array(['foo', 'bar', 'spam', 'eggs']),
TypeError),
(np.array([1, 2, 3, 4, 5, 6, np.NAN]), TypeError),
(np.array([1, 2, 3, 4, np.Inf, 5, 6]), TypeError)])
def test_ensemble_abnormal_input(data, exception):
'''
test naive1 raises correct exceptions on abnormal input
'''
model = b.EnsembleNaive(2)
with pytest.raises(exception):
model.fit(data)
@pytest.mark.parametrize("data, expected",
[([1, 2, 3, 4, 5], 3.0),
([139, 32, 86, 123, 61, 51, 108,
137, 33, 25], 79.5),
([1, 2, 3], 2.0)
])
def test_average_forecast_output(data, expected):
'''
test the correct number of error metric functions are returned.
'''
model = b.Average()
model.fit(pd.DataFrame(data))
# point forecasts only
preds = model.predict(1)
assert preds[0] == expected
@pytest.mark.parametrize("data, expected",
[([1, 2, 3, 4, 5], 5.0),
([139, 32, 86, 123, 61, 51,
108, 137, 33, 25], 25.0),
([1, 2, 3], 3.0)
])
def test_naive1_forecast_output(data, expected):
'''
test naive1 carries forward the last value in the series
'''
model = b.Naive1()
model.fit(pd.DataFrame(data))
# point forecasts only
preds = model.predict(1)
assert preds[0] == expected
@pytest.mark.parametrize("data, period, expected",
[(np.resize(np.arange(12), 24), 12, np.arange(12)),
(np.resize(np.arange(24), 48), 24, np.arange(24)),
(pd.Series(np.resize(np.arange(12), 24)),
12, pd.Series(np.arange(12)))
])
def test_snaive_forecast_output(data, period, expected):
'''
test naive1 carries forward the last value in the series
'''
model = b.SNaive(period)
model.fit(data)
# point forecasts only
preds = model.predict(period)
assert np.array_equal(preds, expected)
@pytest.mark.parametrize("data, period, expected",
[(np.resize(np.arange(12), 24), 12, np.full(12,
np.arange(12).mean())),
(np.resize(np.arange(24), 48), 24,
np.full(24, np.arange(24).mean())),
(pd.Series(np.resize(np.arange(12), 24)),
12, np.full(12, np.arange(12).mean()))
])
def test_average_forecast_output_longer_horizon(data, period, expected):
'''
test naive1 carries forward the last value in the series
'''
model = b.Average()
model.fit(data)
# point forecasts only
preds = model.predict(period)
assert np.array_equal(preds, expected)
@pytest.mark.parametrize("data, exception",
[(np.array([]), ValueError),
(1.0, TypeError),
(np.array(['foo', 'bar', 'spam', 'eggs']),
TypeError),
(np.array([1, 2, 3, 4, 5, 6, np.NAN]), TypeError),
(np.array([1, 2, 3, 4, np.Inf, 5, 6]), TypeError)])
def test_naive1_abnormal_input(data, exception):
'''
test naive1 raises correct exceptions on abnormal input
'''
model = b.Naive1()
with pytest.raises(exception):
model.fit(data)
@pytest.mark.parametrize("data, exception",
[(np.array([]), ValueError),
(1.0, TypeError),
(np.array(['foo', 'bar', 'spam', 'eggs']),
TypeError),
(np.array([1, 2, 3, 4, 5, 6, np.nan]), TypeError),
(np.array([1, 2, 3, 4, np.Inf, 5, 6]), TypeError)
])
def test_snaive_abnormal_input(data, exception):
'''
test snaive raises correct exceptions on abnormal input
'''
model = b.SNaive(1)
with pytest.raises(exception):
model.fit(data)
@pytest.mark.parametrize("data, exception",
[(np.array([]), ValueError),
(1.0, TypeError),
(np.array(['foo', 'bar', 'spam', 'eggs']),
TypeError),
(np.array([1, 2, 3, 4, 5, 6, np.nan]), TypeError),
(np.array([1, 2, 3, 4, np.Inf, 5, 6]), TypeError)])
def test_average_abnormal_input(data, exception):
'''
test average raises correct exceptions on abnormal input
'''
model = b.Average()
with pytest.raises(exception):
model.fit(data)
@pytest.mark.parametrize("data, exception",
[(np.array([]), ValueError),
(1.0, TypeError),
(np.array(['foo', 'bar', 'spam', 'eggs']),
TypeError),
(np.array([1, 2, 3, 4, 5, 6, np.nan]), TypeError),
(np.array([1, 2, 3, 4, np.Inf, 5, 6]), TypeError)])
def test_drift_abnormal_input(data, exception):
'''
test drift raises correct exceptions on abnormal input
'''
model = b.Drift()
with pytest.raises(exception):
model.fit(data)
@pytest.mark.parametrize("data, horizon, alpha, expected",
[([1, 2, 3, 4, 5], 12, [0.2, 0.05], 12),
([1, 2, 3, 4, 5], 24, [0.2, 0.10, 0.05], 24),
([1, 2, 3], 8, [0.8], 8)
])
def test_naive1_pi_horizon(data, horizon, alpha, expected):
'''
test the correct forecast horizon is returned for prediction interval
for Naive1
'''
model = b.Naive1()
model.fit(pd.Series(data))
# point forecasts only
_, intervals = model.predict(horizon, return_predict_int=True, alpha=alpha)
assert len(intervals[0]) == expected
@pytest.mark.parametrize("data, horizon, alpha, expected",
[([1, 2, 3, 4, 5], 12, [0.2, 0.05], 12),
([1, 2, 3, 4, 5], 24, [0.2, 0.10, 0.05], 24),
([1, 2, 3], 8, [0.8], 8)
])
def test_snaive_pi_horizon(data, horizon, alpha, expected):
'''
test the correct forecast horizon is returned for prediction
interval for SNaive
'''
model = b.SNaive(1)
model.fit(pd.Series(data))
# point forecasts only
_, intervals = model.predict(horizon, return_predict_int=True, alpha=alpha)
assert len(intervals[0]) == expected
@pytest.mark.parametrize("data, horizon, alpha, expected",
[([1, 2, 3, 4, 5], 12, [0.2, 0.05], 12),
([1, 2, 3, 4, 5], 24, [0.2, 0.10, 0.05], 24),
([1, 2, 3], 8, [0.8], 8)
])
def test_drift_pi_horizon(data, horizon, alpha, expected):
'''
test the correct forecast horizon is returned for prediction
interval for Drift
'''
model = b.Drift()
model.fit(pd.Series(data))
# point forecasts only
_, intervals = model.predict(
horizon, return_predict_int=True, alpha=alpha)
assert len(intervals[0]) == expected
@pytest.mark.parametrize("data, horizon, alpha, expected",
[([1, 2, 3, 4, 5], 12, [0.2, 0.05], 12),
([1, 2, 3, 4, 5], 24, [0.2, 0.10, 0.05], 24),
([1, 2, 3], 8, [0.8], 8)
])
def test_average_pi_horizon(data, horizon, alpha, expected):
'''
test the correct forecast horizon is returned for prediction
interval for Average
'''
model = b.Average()
model.fit(pd.Series(data))
# point forecasts only
_, intervals = model.predict(
horizon, return_predict_int=True, alpha=alpha)
assert len(intervals[0]) == expected
@pytest.mark.parametrize("model, data, horizon, alpha, expected",
[(b.Naive1(), [1, 2, 3, 4, 5], 12, [0.2, 0.05], 2),
(b.Naive1(), [1, 2, 3, 4, 5],
24, [0.2, 0.10, 0.05], 3),
(b.SNaive(1), [1, 2, 3], 8, [0.8], 1),
(b.SNaive(1), [1, 2, 3, 4, 5],
24, [0.2, 0.10, 0.05], 3),
(b.Naive1(), [1, 2, 3], 8, None, 2),
(b.SNaive(1), [1, 2, 3], 8, None, 2),
(b.Average(), [1, 2, 3], 8, None, 2),
(b.Drift(), [1, 2, 3], 8, None, 2),
(b.Drift(), [1, 2, 3], 8, [0.8], 1),
(b.Drift(), [1, 2, 3], 8, None, 2),
(b.Average(), [1, 2, 3, 4, 5],
24, [0.2, 0.10, 0.05], 3)
])
def test_naive_pi_set_number(model, data, horizon, alpha, expected):
'''
test the correct number of Prediction intervals are
returned for prediction interval for all Naive forecasting classes
'''
model.fit( | pd.Series(data) | pandas.Series |
import json
import sys
import pandas as pd
from pandas import DataFrame
from db.sql import dal
from flask import request
import tempfile
import tarfile
import csv
import shutil
import subprocess
from flask import send_from_directory
from annotation.main import T2WMLAnnotation
from db.sql.kgtk import import_kgtk_dataframe
from api.variable.delete import VariableDeleter
from api.metadata.main import DatasetMetadataResource, VariableMetadataResource
from api.metadata.metadata import DatasetMetadata
from api.metadata.update import DatasetMetadataUpdater
from annotation.validation.validate_annotation import ValidateAnnotation
from time import time
from datetime import datetime
from typing import Dict, List, Any, Union, NoReturn, Optional, Tuple
import traceback
class AnnotatedData(object):
def __init__(self):
self.ta = T2WMLAnnotation()
self.va = ValidateAnnotation()
self.vmr = VariableMetadataResource()
self.vd = VariableDeleter()
def process(self, dataset, is_request_put=False):
l = time()
validate = request.args.get('validate', 'true').lower() == 'true'
files_only = request.args.get('files_only', 'false').lower() == 'true'
create_if_not_exist = request.args.get('create_if_not_exist', 'false').lower() == 'true'
return_tsv = request.args.get('tsv', 'false').lower() == 'true'
# check if the dataset exists
s = time()
dataset_qnode = dal.get_dataset_id(dataset)
print(f'time take to get dataset: {time() - s} seconds')
if not create_if_not_exist and not dataset_qnode:
print(f'Dataset not defined: {dataset}')
return {'Error': 'Dataset not found: {}'.format(dataset)}, 404
file_name = request.files['file'].filename
t2wml_yaml, metadata_edges = None, None
if 't2wml_yaml' in request.files:
request.files['t2wml_yaml'].seek(0)
t2wml_yaml = str(request.files['t2wml_yaml'].read(), 'utf-8')
if not (file_name.endswith('.xlsx') or file_name.endswith('.csv')):
return {"Error": "Please upload an annotated excel file or a csv file "
"(file name ending with .xlsx or .csv)"}, 400
if file_name.endswith('.xlsx'):
df = pd.read_excel(request.files['file'], dtype=object, header=None).fillna('')
elif file_name.endswith('.csv'):
df = pd.read_csv(request.files['file'], dtype=object, header=None).fillna('')
if dataset_qnode:
# only update metadata if we are going to insert data, if the request is only to return files, skip
if not files_only:
# update dataset metadata last_updated field
DatasetMetadataUpdater().update(dataset)
else:
try:
dataset_dict = {
'dataset_id': df.iloc[0, 1],
'name': df.iloc[0, 2],
'description': df.iloc[0, 3],
'url': df.iloc[0, 4]
}
except IndexError as e:
return {'Error': 'Dataset does not exist. And metadata is not avaliable to create it.'}, 400
missing = []
for key, value in dataset_dict.items():
if not value:
missing.append(key)
if len(missing) > 0:
print(f'Dataset metadata missing fields: {missing}')
return {'Error': f'Dataset metadata missing fields: {missing}'}, 404
metadata = DatasetMetadata()
metadata.from_dict(dataset_dict)
dataset_qnode, metadata_edges = DatasetMetadataUpdater().create_dataset(metadata)
s = time()
validation_report, valid_annotated_file, rename_columns = self.va.validate(dataset, df=df)
print(f'time take to validate annotated file: {time() - s} seconds')
if validate:
if not valid_annotated_file:
return json.loads(validation_report), 400
if files_only:
t2wml_yaml, combined_item_def_df, consolidated_wikifier_df = self.ta.process(dataset_qnode, df,
rename_columns,
extra_files=True,
t2wml_yaml=t2wml_yaml)
temp_tar_dir = tempfile.mkdtemp()
open(f'{temp_tar_dir}/t2wml.yaml', 'w').write(t2wml_yaml)
combined_item_def_df.to_csv(f'{temp_tar_dir}/item_definitions_all.tsv', sep='\t', index=False)
consolidated_wikifier_df.to_csv(f'{temp_tar_dir}/consolidated_wikifier.csv', index=False)
with tarfile.open(f'{temp_tar_dir}/t2wml_annotation_files.tar.gz', "w:gz") as tar:
tar.add(temp_tar_dir, arcname='.')
try:
return send_from_directory(temp_tar_dir, 't2wml_annotation_files.tar.gz')
finally:
shutil.rmtree(temp_tar_dir)
elif return_tsv:
variable_ids, kgtk_exploded_df = self.generate_kgtk_dataset(dataset, dataset_qnode, df, rename_columns, t2wml_yaml, is_request_put)
self.import_to_database(kgtk_exploded_df)
temp_tar_dir = tempfile.mkdtemp()
# Generate dataset kgtk file
dataset_path = f'/{temp_tar_dir}/{dataset}-dataset-exploded.tsv'
kgtk_exploded_df.to_csv(dataset_path, index=None, sep='\t', quoting=csv.QUOTE_NONE, quotechar='')
# Generate dataset metadata kgtk file and explode it
metadata_path = f'/{temp_tar_dir}/{dataset}-metadata-exploded.tsv'
if metadata_edges is None:
metadata_edges = self.generate_kgtk_dataset_metadata(dataset)
metadata_df = | pd.DataFrame(metadata_edges) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(
["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(
['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j", 0:1]
expected = DataFrame({'cats': Series(
Categorical(
['a', 'b', 'b'], categories=['a', 'b', 'c']), index=['h', 'i',
'j'])})
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(
["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame(
{"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame(
{"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
| tm.assert_frame_equal(df, exp_parts_cats_col) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from os.path import join
import rasterio
import pandas as pd
import rtree
import geopandas as gp
from shapely.geometry import Point
# local
from cmftools.cmf_maps_analyze import get_outlets
def sjoin_nn(points1, points2, max_dist=np.inf, prefix='p2'):
"""function for spatial join of the nearest neighbour from points2 to points1
:param points1: geopandas GeoDataFrame with point geometry
:param points2: geopandas GeoDataFrame with point geometry
:param max_dist: float. maximum distance between two nn points. in km if latlon
:param prefix: string used as prefix for metadata from points2
:param latlon: if True (calculate distance on unit sphere in lat lon coordinates)"""
assert points1.crs == points2.crs, 'the point geometries should have the same crs'
points1 = points1.copy()
distf = latlon_distance
x_col, y_col = prefix + '_lon', prefix + '_lat'
# build spatial rtree index of points2
tree_idx = rtree.index.Index()
for i, geom in enumerate(points2.geometry):
tree_idx.insert(i, geom.coords[:][0])
# create new columns for output
dist_col, idx_col = prefix + '_dist', prefix + '_idx'
result = | pd.DataFrame(columns=[x_col, y_col, dist_col, idx_col], index=points1.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import pandas as pd
from app import app
import urllib.parse
import json
# Get data
filename = 'assets/auditory-cortex_review_included_studies_procsteps.txt'
df_studies = | pd.read_csv(filename, sep='\t', lineterminator='\r') | pandas.read_csv |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals( | pandas.Series([True, True, True]) | pandas.Series |
# -*- coding: utf-8 -*-
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import date_range
try:
import pandas.tseries.holiday
except ImportError:
pass
hcal = | pd.tseries.holiday.USFederalHolidayCalendar() | pandas.tseries.holiday.USFederalHolidayCalendar |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Operation classes for Meterstick."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from typing import Any, Iterable, List, Optional, Sequence, Text, Tuple, Union
import warnings
from meterstick import confidence_interval_display
from meterstick import metrics
from meterstick import sql
from meterstick import utils
import numpy as np
import pandas as pd
from scipy import stats
class Operation(metrics.Metric):
"""A special kind of Metric that operates on other Metric instance(s).
The differences between Metric and Operation are
1. Operation must take other Metric(s) as the children to operate on.
2. The name of Operation is reflected in the result differently. A Metric
usually returns a 1D data and its name could just be used as the column.
However, Operation often operates on MetricList and one name doesn't fit
all. What we do is we apply the name_tmpl of Operation to all Metric names
Attributes:
name: Name of the Metric.
name_tmpl: The template to generate the name from child Metrics' names.
children: A Length-1 tuple of the child Metric(s) whose results will be the
input to the Operation. Might be None in __init__, but must be assigned
before compute().
extra_index: Many Operations rely on adding extra split_by columns to child
Metric. For example,
PercentChange('condition', base_value, Sum('X')).compute_on(df, 'grp')
would compute Sum('X').compute_on(df, ['grp', 'condition']) then get the
change. As the result, the CacheKey used in PercentChange is different to
that used in Sum('X'). The latter has more columns in the split_by.
extra_index records what columns need to be added to children Metrics so
we can flush the cache correctly. The convention is extra_index comes
after split_by. If not, you need to overwrite flush_children().
precomputable_in_jk: Indicates whether it is possible to cut corners to
obtain leave-one-out (LOO) estimates for the Jackknife. This attribute is
True if the input df is only used in compute_on() and compute_child().
This is necessary because Jackknife emits None as the input df for LOO
estimation when cutting corners. The compute_on() and compute_child()
functions know to read from cache but other functions may not know what to
do. If your Operation uses df outside the compute_on() and compute_child()
functions, you have either to
1. ensure that your computation doesn't break when df is None.
2. set attribute 'precomputable_in_jk' to False (which will force the
jackknife to be computed the manual way, which is slower).
precompute_child: Many Operations first compute the child Metric with extra
split_by columns, then perform computation on the child Metric' result. If
precompute_child is True, in the precompute(), we return
self.children[0].compute_on(df, split_by + self.extra_index). Otherwise
the original input data is returned.
where: A string or list of strings to be concatenated that will be passed to
df.query() as a prefilter.
cache_key: What key to use to cache the df. You can use anything that can be
a key of a dict except '_RESERVED' and tuples like ('_RESERVED', ...).
And all other attributes inherited from Metric.
"""
def __init__(self,
child: Optional[metrics.Metric] = None,
name_tmpl: Optional[Text] = None,
extra_index: Optional[Union[Text, Iterable[Text]]] = None,
name: Optional[Text] = None,
where: Optional[Union[Text, Sequence[Text]]] = None,
**kwargs):
if name_tmpl and not name:
name = name_tmpl.format(utils.get_name(child))
super(Operation, self).__init__(name, child or (), where, name_tmpl,
**kwargs)
self.extra_index = [extra_index] if isinstance(extra_index,
str) else extra_index or []
self.precomputable_in_jk = True
self.precompute_child = True
self.apply_name_tmpl = True
def split_data(self, df, split_by=None):
"""Splits the DataFrame returned by the children if it's computed."""
if not self.precompute_child or not split_by:
for i in super(Operation, self).split_data(df, split_by):
yield i
else:
keys, indices = zip(*df.groupby(split_by).groups.items())
for k, idx in zip(keys, indices):
yield df.loc[idx.unique()].droplevel(split_by), k
def compute_children(self,
df: pd.DataFrame,
split_by=None,
melted=False,
return_dataframe=True,
cache_key=None):
return self.compute_child(df, split_by, melted, return_dataframe, cache_key)
def compute_child(self,
df: pd.DataFrame,
split_by=None,
melted=False,
return_dataframe=True,
cache_key=None):
child = self.children[0]
cache_key = cache_key or self.cache_key or self.RESERVED_KEY
cache_key = self.wrap_cache_key(cache_key, split_by)
return child.compute_on(df, split_by, melted, return_dataframe, cache_key)
def compute_on_sql_mixed_mode(self, table, split_by, execute, mode=None):
res = super(Operation,
self).compute_on_sql_mixed_mode(table, split_by, execute, mode)
return utils.apply_name_tmpl(self.name_tmpl, res)
def flush_children(self,
key=None,
split_by=None,
where=None,
recursive=True,
prune=True):
split_by = (split_by or []) + self.extra_index
super(Operation, self).flush_children(key, split_by, where, recursive,
prune)
def __call__(self, child: metrics.Metric):
op = copy.deepcopy(self) if self.children else self
op.name = op.name_tmpl.format(utils.get_name(child))
op.children = (child,)
op.cache = {}
return op
class Distribution(Operation):
"""Computes the normalized values of a Metric over column(s).
Attributes:
extra_index: A list of column(s) to normalize over.
children: A tuple of a Metric whose result we normalize on.
And all other attributes inherited from Operation.
"""
def __init__(self,
over: Union[Text, List[Text]],
child: Optional[metrics.Metric] = None,
**kwargs):
super(Distribution, self).__init__(child, 'Distribution of {}', over,
**kwargs)
def compute_on_children(self, child, split_by):
total = child.groupby(level=split_by).sum() if split_by else child.sum()
return child / total
def get_sql_and_with_clause(self, table, split_by, global_filter, indexes,
local_filter, with_data):
"""Gets the SQL query and WITH clause.
The query is constructed by
1. Get the query for the child metric.
2. Keep all indexing/groupby columns unchanged.
3. For all value columns, get
value / SUM(value) OVER (PARTITION BY split_by).
Args:
table: The table we want to query from.
split_by: The columns that we use to split the data.
global_filter: The sql.Filters that can be applied to the whole Metric
tree.
indexes: The columns that we shouldn't apply any arithmetic operation.
local_filter: The sql.Filters that have been accumulated so far.
with_data: A global variable that contains all the WITH clauses we need.
Returns:
The SQL instance for metric, without the WITH clause component.
The global with_data which holds all datasources we need in the WITH
clause.
"""
local_filter = sql.Filters([self.where, local_filter]).remove(global_filter)
child_sql, with_data = self.children[0].get_sql_and_with_clause(
table, indexes, global_filter, indexes, local_filter, with_data)
child_table = sql.Datasource(child_sql, 'DistributionRaw')
child_table_alias, rename = with_data.merge(child_table)
groupby = sql.Columns(indexes.aliases, distinct=True)
columns = sql.Columns()
for c in child_sql.columns:
if c.alias in groupby:
continue
alias = rename.get(c.alias, c.alias)
col = sql.Column(alias) / sql.Column(
alias, 'SUM({})', partition=split_by.aliases)
col.set_alias('Distribution of %s' % c.alias_raw)
columns.add(col)
return sql.Sql(groupby.add(columns), child_table_alias), with_data
Normalize = Distribution # An alias.
class CumulativeDistribution(Operation):
"""Computes the normalized cumulative sum.
Attributes:
extra_index: A list of column(s) to normalize over.
children: A tuple of a Metric whose result we compute the cumulative
distribution on.
order: An iterable. The over column will be ordered by it before computing
cumsum.
ascending: Sort ascending or descending.
And all other attributes inherited from Operation.
"""
def __init__(self,
over: Text,
child: Optional[metrics.Metric] = None,
order=None,
ascending: bool = True,
**kwargs):
self.order = order
self.ascending = ascending
super(CumulativeDistribution,
self).__init__(child, 'Cumulative Distribution of {}', over, **kwargs)
def compute(self, df):
if self.order:
df = pd.concat((
df.loc[[o]] for o in self.order if o in df.index.get_level_values(0)))
else:
df.sort_values(self.extra_index, ascending=self.ascending, inplace=True)
dist = df.cumsum()
dist /= df.sum()
return dist
def get_sql_and_with_clause(self, table, split_by, global_filter, indexes,
local_filter, with_data):
"""Gets the SQL query and WITH clause.
The query is constructed by
1. Get the query for the Distribution of the child Metric.
2. Keep all indexing/groupby columns unchanged.
3. For all value columns, get the cumulative sum by summing over
'ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW'.
Args:
table: The table we want to query from.
split_by: The columns that we use to split the data.
global_filter: The sql.Filters that can be applied to the whole Metric
tree.
indexes: The columns that we shouldn't apply any arithmetic operation.
local_filter: The sql.Filters that have been accumulated so far.
with_data: A global variable that contains all the WITH clauses we need.
Returns:
The SQL instance for metric, without the WITH clause component.
The global with_data which holds all datasources we need in the WITH
clause.
"""
local_filter = sql.Filters([self.where, local_filter]).remove(global_filter)
util_metric = Distribution(self.extra_index, self.children[0])
child_sql, with_data = util_metric.get_sql_and_with_clause(
table, split_by, global_filter, indexes, local_filter, with_data)
child_table = sql.Datasource(child_sql, 'CumulativeDistributionRaw')
child_table_alias, rename = with_data.merge(child_table)
columns = sql.Columns(indexes.aliases)
order = list(metrics.get_extra_idx(self))
order[0] = sql.Column(
_get_order_for_cum_dist(sql.Column(order[0]).alias, self),
auto_alias=False)
for c in child_sql.columns:
if c in columns:
continue
col = sql.Column(
rename.get(c.alias, c.alias),
'SUM({})',
partition=split_by.aliases,
order=order,
window_frame='ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW')
col.set_alias('Cumulative %s' % c.alias_raw)
columns.add(col)
return sql.Sql(columns, child_table_alias), with_data
def _get_order_for_cum_dist(over, metric):
if metric.order:
over = 'CASE %s\n' % over
tmpl = 'WHEN %s THEN %s'
over += '\n'.join(
tmpl % (_format_to_condition(o), i) for i, o in enumerate(metric.order))
over += '\nELSE %s\nEND' % len(metric.order)
return over if metric.ascending else over + ' DESC'
def _format_to_condition(val):
if isinstance(val, str) and not val.startswith('$'):
return '"%s"' % val
return '%s' % val
class Comparison(Operation):
"""Base class for comparisons like percent/absolute change."""
def __init__(self,
condition_column,
baseline_key,
child: Optional[metrics.Metric] = None,
include_base: bool = False,
name_tmpl: Optional[Text] = None,
**kwargs):
self.baseline_key = baseline_key
self.include_base = include_base
super(Comparison, self).__init__(child, name_tmpl, condition_column,
**kwargs)
def get_sql_and_with_clause(self, table, split_by, global_filter, indexes,
local_filter, with_data):
"""Gets the SQL for PercentChange or AbsoluteChange.
The query is constructed by
1. Get the query for the child metric and add it to with_data, we call it
raw_value_table.
2. Query the rows that only has the base value from raw_value_table, add it
to with_data too. We call it base_value_table.
3. sql.Join the two tables and computes the change for all value columns.
For example, the query for
AbsoluteChange('condition', 'base_value', metrics.Mean('click'))
will look like this:
WITH
ChangeRaw AS (SELECT
split_by,
condition,
AVG(click) AS `mean(click)`
FROM $DATA
GROUP BY split_by, condition),
ChangeBase AS (SELECT
split_by,
`mean(click)`
FROM ChangeRaw
WHERE
condition = "base_value")
SELECT
split_by,
condition,
ChangeRaw.`mean(click)` - ChangeBase.`mean(click)`
AS `mean(click) Absolute Change`
FROM ChangeRaw
JOIN
ChangeBase
USING (split_by)
WHERE
condition != "base_value"
Args:
table: The table we want to query from.
split_by: The columns that we use to split the data.
global_filter: The sql.Filters that can be applied to the whole Metric
tree.
indexes: The columns that we shouldn't apply any arithmetic operation.
local_filter: The sql.Filters that have been accumulated so far.
with_data: A global variable that contains all the WITH clauses we need.
Returns:
The SQL instance for metric, without the WITH clause component.
The global with_data which holds all datasources we need in the WITH
clause.
"""
if not isinstance(self, (PercentChange, AbsoluteChange)):
raise ValueError('Not a PercentChange nor AbsoluteChange!')
local_filter = sql.Filters([self.where, local_filter]).remove(global_filter)
child = self.children[0]
cond_cols = sql.Columns(self.extra_index)
groupby = sql.Columns(split_by).add(cond_cols)
alias_tmpl = self.name_tmpl
raw_table_sql, with_data = child.get_sql_and_with_clause(
table, groupby, global_filter, indexes, local_filter, with_data)
raw_table = sql.Datasource(raw_table_sql, 'ChangeRaw')
raw_table_alias, rename = with_data.merge(raw_table)
base = self.baseline_key if isinstance(self.baseline_key,
tuple) else [self.baseline_key]
base_cond = ('%s = %s' % (c, _format_to_condition(b))
for c, b in zip(cond_cols.aliases, base))
base_cond = ' AND '.join(base_cond)
cols = sql.Columns(raw_table_sql.groupby.aliases)
cols.add((rename.get(a, a) for a in raw_table_sql.columns.aliases))
base_value = sql.Sql(
cols.difference(cond_cols.aliases), raw_table_alias, base_cond)
base_table = sql.Datasource(base_value, 'ChangeBase')
base_table_alias, rename = with_data.merge(base_table)
exclude_base_condition = ('%s != %s' % (c, _format_to_condition(b))
for c, b in zip(cond_cols.aliases, base))
exclude_base_condition = ' OR '.join(exclude_base_condition)
cond = None if self.include_base else sql.Filters([exclude_base_condition])
col_tmp = '%s.{r} - %s.{b}' if isinstance(
self, AbsoluteChange) else 'SAFE_DIVIDE(%s.{r}, (%s.{b})) * 100 - 100'
columns = sql.Columns()
for c in raw_table_sql.columns.difference(indexes.aliases):
raw_table_col = rename.get(c.alias, c.alias)
base_table_col = rename.get(c.alias, c.alias)
col = sql.Column(
col_tmp.format(r=raw_table_col, b=base_table_col) %
(raw_table_alias, base_table_alias),
alias=alias_tmpl.format(c.alias_raw))
columns.add(col)
using = indexes.difference(cond_cols)
join = '' if using else 'CROSS'
return sql.Sql(
sql.Columns(indexes.aliases).add(columns),
sql.Join(raw_table_alias, base_table_alias, join=join, using=using),
cond), with_data
class PercentChange(Comparison):
"""Percent change estimator on a Metric.
Attributes:
extra_index: The column(s) that contains the conditions.
baseline_key: The value of the condition that represents the baseline (e.g.,
"Control"). All conditions will be compared to this baseline. If
condition_column contains multiple columns, then baseline_key should be a
tuple.
children: A tuple of a Metric whose result we compute percentage change on.
include_base: A boolean for whether the baseline condition should be
included in the output.
And all other attributes inherited from Operation.
"""
def __init__(self,
condition_column: Text,
baseline_key,
child: Optional[metrics.Metric] = None,
include_base: bool = False,
**kwargs):
super(PercentChange,
self).__init__(condition_column, baseline_key, child, include_base,
'{} Percent Change', **kwargs)
def compute_on_children(self, child, split_by):
level = None
if split_by:
level = self.extra_index[0] if len(
self.extra_index) == 1 else self.extra_index
res = (child / child.xs(self.baseline_key, level=level) - 1) * 100
if not self.include_base:
to_drop = [i for i in res.index.names if i not in self.extra_index]
idx_to_match = res.index.droplevel(to_drop) if to_drop else res.index
res = res[~idx_to_match.isin([self.baseline_key])]
return res
class AbsoluteChange(Comparison):
"""Absolute change estimator on a Metric.
Attributes:
extra_index: The column(s) that contains the conditions.
baseline_key: The value of the condition that represents the baseline (e.g.,
"Control"). All conditions will be compared to this baseline. If
condition_column contains multiple columns, then baseline_key should be a
tuple.
children: A tuple of a Metric whose result we compute absolute change on.
include_base: A boolean for whether the baseline condition should be
included in the output.
And all other attributes inherited from Operation.
"""
def __init__(self,
condition_column: Text,
baseline_key,
child: Optional[metrics.Metric] = None,
include_base: bool = False,
**kwargs):
super(AbsoluteChange,
self).__init__(condition_column, baseline_key, child, include_base,
'{} Absolute Change', **kwargs)
def compute_on_children(self, child, split_by):
level = None
if split_by:
level = self.extra_index[0] if len(
self.extra_index) == 1 else self.extra_index
# Don't use "-=". For multiindex it might go wrong. The reason is DataFrame
# has different implementations for __sub__ and __isub__. ___isub__ tries
# to reindex to update in place which sometimes lead to lots of NAs.
res = child - child.xs(self.baseline_key, level=level)
if not self.include_base:
to_drop = [i for i in res.index.names if i not in self.extra_index]
idx_to_match = res.index.droplevel(to_drop) if to_drop else res.index
res = res[~idx_to_match.isin([self.baseline_key])]
return res
class MH(Comparison):
"""Cochran-Mantel-Haenszel statistics estimator on a Metric.
MH only takes a ratio of two single-column Metrics, or a MetricList of such
ratios.
So AbsoluteChange(MetricList([a, b])) / AbsoluteChange(MetricList([c, d]))
won't work. Instead please use
MetricList([AbsoluteChange(a) / AbsoluteChange(c),
AbsoluteChange(b) / AbsoluteChange(d)]).
Attributes:
extra_index: The column(s) that contains the conditions.
baseline_key: The value of the condition that represents the baseline (e.g.,
"Control"). All conditions will be compared to this baseline. If
condition_column contains multiple columns, then baseline_key should be a
tuple.
stratified_by: The stratification column(s) in the DataFrame.
children: A tuple of a Metric whose result we compute the MH on.
include_base: A boolean for whether the baseline condition should be
included in the output.
And all other attributes inherited from Operation.
"""
def __init__(self,
condition_column: Text,
baseline_key: Text,
stratified_by: Union[Text, List[Text]],
metric: Optional[metrics.Metric] = None,
include_base: bool = False,
**kwargs):
self.stratified_by = stratified_by if isinstance(stratified_by,
list) else [stratified_by]
super(MH, self).__init__(condition_column, baseline_key, metric,
include_base, '{} MH Ratio', **kwargs)
def check_is_ratio(self, metric=None):
metric = metric or self.children[0]
if isinstance(metric, metrics.MetricList):
for m in metric:
self.check_is_ratio(m)
else:
if not isinstance(metric,
metrics.CompositeMetric) or metric.op(2.0, 2) != 1:
raise ValueError('MH only makes sense on ratio Metrics.')
def compute_children(self,
df: pd.DataFrame,
split_by=None,
melted=False,
return_dataframe=True,
cache_key=None):
self.check_is_ratio()
child = self.children[0]
cache_key = cache_key or self.cache_key or self.RESERVED_KEY
cache_key = self.wrap_cache_key(cache_key, split_by + self.stratified_by)
if isinstance(child, metrics.MetricList):
children = []
for m in child.children:
util_metric = metrics.MetricList(m.children)
children.append(
util_metric.compute_on(
df, split_by + self.stratified_by, cache_key=cache_key))
return children
util_metric = metrics.MetricList(child.children)
return util_metric.compute_on(
df, split_by + self.stratified_by, cache_key=cache_key)
def compute_on_children(self, children, split_by):
child = self.children[0]
if isinstance(child, metrics.MetricList):
res = [
self.compute_mh(c, d, split_by)
for c, d in zip(child.children, children)
]
return pd.concat(res, 1, sort=False)
return self.compute_mh(child, children, split_by)
def compute_mh(self, child, df_all, split_by):
"""Computes MH statistics for one Metric."""
level = self.extra_index[0] if len(
self.extra_index) == 1 else self.extra_index
df_baseline = df_all.xs(self.baseline_key, level=level)
suffix = '_base'
numer = child.children[0].name
denom = child.children[1].name
df_mh = df_all.join(df_baseline, rsuffix=suffix)
ka, na = df_mh[numer], df_mh[denom]
kb, nb = df_mh[numer + suffix], df_mh[denom + suffix]
weights = 1. / (na + nb)
to_split = [i for i in ka.index.names if i not in self.stratified_by]
res = ((ka * nb * weights).groupby(to_split).sum() /
(kb * na * weights).groupby(to_split).sum() - 1) * 100
res.name = child.name
to_split = [i for i in to_split if i not in self.extra_index]
if to_split:
split_by = split_by or []
extra_idx = [i for i in to_split if i not in split_by]
res = res.reorder_levels(split_by + self.extra_index + extra_idx)
if not self.include_base:
to_drop = [i for i in res.index.names if i not in self.extra_index]
idx_to_match = res.index.droplevel(to_drop) if to_drop else res.index
res = res[~idx_to_match.isin([self.baseline_key])]
return pd.DataFrame(res.sort_index(level=split_by + self.extra_index))
def compute_children_sql(self, table, split_by, execute, mode=None):
self.check_is_ratio()
child = self.children[0]
if isinstance(child, metrics.MetricList):
children = []
for m in child.children:
util_metric = metrics.MetricList(m.children)
c = util_metric.compute_on_sql(
table,
split_by + self.extra_index + self.stratified_by,
execute,
mode=mode)
children.append(c)
return children
util_metric = metrics.MetricList(child.children)
return util_metric.compute_on_sql(
table,
split_by + self.extra_index + self.stratified_by,
execute,
mode=mode)
def flush_children(self,
key=None,
split_by=None,
where=None,
recursive=True,
prune=True):
"""Flushes the grandchildren as child is not computed."""
split_by = (split_by or []) + self.extra_index + self.stratified_by
if isinstance(self.children[0], metrics.MetricList):
for c in self.children[0]:
c.flush_children(key, split_by, where, recursive, prune)
else:
self.children[0].flush_children(key, split_by, where, recursive, prune)
def get_sql_and_with_clause(self, table, split_by, global_filter, indexes,
local_filter, with_data):
"""Gets the SQL query and WITH clause.
The query is constructed in a similar way to AbsoluteChange except that we
apply weights to adjust the change.
For example, the query for
MH('condition', 'base_value', 'stratified',
metrics.Ratio('click', 'impression', 'ctr'))
will look like this:
WITH
MHRaw AS (SELECT
split_by,
condition,
stratified,
SUM(click) AS `sum(click)`,
SUM(impression) AS `sum(impression)`
FROM $DATA
GROUP BY split_by, condition, stratified),
MHBase AS (SELECT
split_by,
stratified,
`sum(click)`,
`sum(impression)`
FROM MHRaw
WHERE
condition = "base_value")
SELECT
split_by,
condition,
100 * SAFE_DIVIDE(
COALESCE(SUM(SAFE_DIVIDE(MHRaw.`sum(click)` * MHBase.`sum(impression)`,
MHBase.`sum(impression)` + MHRaw.`sum(impression)`)), 0),
COALESCE(SUM(SAFE_DIVIDE(MHBase.`sum(click)` * MHRaw.`sum(impression)`,
MHBase.`sum(impression)` + MHRaw.`sum(impression)`)), 0)) - 100
AS `ctr MH Ratio`
FROM MHRaw
JOIN
MHBase
USING (split_by, stratified)
WHERE
condition != "base_value"
GROUP BY split_by, condition
Args:
table: The table we want to query from.
split_by: The columns that we use to split the data.
global_filter: The sql.Filters that can be applied to the whole Metric
tree.
indexes: The columns that we shouldn't apply any arithmetic operation.
local_filter: The sql.Filters that have been accumulated so far.
with_data: A global variable that contains all the WITH clauses we need.
Returns:
The SQL instance for metric, without the WITH clause component.
The global with_data which holds all datasources we need in the WITH
clause.
"""
self.check_is_ratio()
local_filter = sql.Filters([self.where, local_filter]).remove(global_filter)
child = self.children[0]
grandchildren = []
if isinstance(child, metrics.MetricList):
for m in child:
grandchildren += list(m.children)
else:
grandchildren = child.children
cond_cols = sql.Columns(self.extra_index)
groupby = sql.Columns(split_by).add(cond_cols).add(self.stratified_by)
util_metric = metrics.MetricList(grandchildren)
util_indexes = sql.Columns(indexes).add(self.stratified_by)
raw_table_sql, with_data = util_metric.get_sql_and_with_clause(
table, groupby, global_filter, util_indexes, local_filter, with_data)
raw_table = sql.Datasource(raw_table_sql, 'MHRaw')
raw_table_alias, _ = with_data.merge(raw_table)
base = self.baseline_key if isinstance(self.baseline_key,
tuple) else [self.baseline_key]
base_cond = ('%s = %s' % (c, _format_to_condition(b))
for c, b in zip(cond_cols.aliases, base))
base_cond = ' AND '.join(base_cond)
base_value = sql.Sql(
sql.Columns(raw_table_sql.groupby.aliases).add(
raw_table_sql.columns.aliases).difference(cond_cols.aliases),
raw_table_alias, base_cond)
base_table = sql.Datasource(base_value, 'MHBase')
base_table_alias, _ = with_data.merge(base_table)
exclude_base_condition = ('%s != %s' % (c, _format_to_condition(b))
for c, b in zip(cond_cols.aliases, base))
exclude_base_condition = ' OR '.join(exclude_base_condition)
cond = None if self.include_base else sql.Filters([exclude_base_condition])
col_tmpl = """100 * SAFE_DIVIDE(
COALESCE(SUM(SAFE_DIVIDE(
{raw}.%(numer)s * {base}.%(denom)s,
{base}.%(denom)s + {raw}.%(denom)s)), 0),
COALESCE(SUM(SAFE_DIVIDE(
{base}.%(numer)s * {raw}.%(denom)s,
{base}.%(denom)s + {raw}.%(denom)s)), 0)) - 100"""
col_tmpl = col_tmpl.format(raw=raw_table_alias, base=base_table_alias)
columns = sql.Columns()
alias_tmpl = self.name_tmpl
# The columns might get consolidated and have different aliases. We need to
# find them by reconstruction. Reusing the with_data in reconstruction will
# make sure the columns get renamed the same way as in raw_table_sql.
if isinstance(child, metrics.MetricList):
for c in child:
with_data2 = copy.deepcopy(with_data)
numer_sql, with_data2 = c.children[0].get_sql_and_with_clause(
table, groupby, global_filter, util_indexes, local_filter,
with_data2)
with_data2.merge(sql.Datasource(numer_sql))
numer = numer_sql.columns[-1].alias
with_data2 = copy.deepcopy(with_data)
denom_sql, with_data2 = c.children[1].get_sql_and_with_clause(
table, groupby, global_filter, util_indexes, local_filter,
with_data2)
with_data2.merge(sql.Datasource(denom_sql))
denom = denom_sql.columns[-1].alias
columns.add(
sql.Column(
col_tmpl % {
'numer': numer,
'denom': denom
},
alias=alias_tmpl.format(c.name)))
else:
with_data2 = copy.deepcopy(with_data)
numer_sql, with_data2 = child.children[0].get_sql_and_with_clause(
table, groupby, global_filter, util_indexes, local_filter, with_data2)
with_data2.merge(sql.Datasource(numer_sql))
numer = numer_sql.columns[-1].alias
with_data2 = copy.deepcopy(with_data)
denom_sql, with_data2 = child.children[1].get_sql_and_with_clause(
table, groupby, global_filter, util_indexes, local_filter, with_data2)
with_data2.merge(sql.Datasource(denom_sql))
denom = denom_sql.columns[-1].alias
columns = sql.Column(
col_tmpl % {
'numer': numer,
'denom': denom,
},
alias=alias_tmpl.format(child.name))
using = indexes.difference(cond_cols).add(self.stratified_by)
return sql.Sql(columns,
sql.Join(raw_table_alias, base_table_alias, using=using),
cond, indexes.aliases), with_data
def get_display_fn(name,
split_by=None,
melted=False,
value='Value',
condition_column: Optional[List[Text]] = None,
ctrl_id=None,
default_metric_formats=None):
"""Returns a function that displays confidence interval nicely.
Args:
name: 'Jackknife' or 'Bootstrap'.
split_by: The split_by passed to Jackknife().compute_on().
melted: Whether the input res is in long format.
value: The name of the value column.
condition_column: Present if the child is PercentChange or AbsoluteChange.
ctrl_id: Present if the child is PercentChange or AbsoluteChange. It's the
baseline_key of the comparison.
default_metric_formats: How to format the numbers in the display.
Returns:
A funtion that takes a DataFrame and displays confidence intervals.
"""
def display(res,
aggregate_dimensions=True,
show_control=None,
metric_formats=None,
sort_by=None,
metric_order=None,
flip_color=None,
hide_null_ctrl=True,
display_expr_info=False,
auto_add_description=False,
return_pre_agg_df=False,
return_formatted_df=False):
"""Displays confidence interval nicely in Colab/Jupyter notebook.
Args:
res: The DataFrame returned by Jackknife or Bootstrap with confidence
level specified.
aggregate_dimensions: Whether to aggregate all dimensions in to one
column.
show_control: If False, only ratio values in non-control rows are shown.
metric_formats: A dict specifying how to display metric values. Keys can
be 'Value' and 'Ratio'. Values can be 'absolute', 'percent', 'pp' or a
formatting string. For example, '{:.2%}' would have the same effect as
'percent'. By default, Value is in absolute form and Ratio in percent.
sort_by: In the form of
[{'column': ('CI_Lower', 'Metric Foo'), 'ascending': False},
{'column': 'Dim Bar': 'order': ['Logged-in', 'Logged-out']}]. 'column'
is the column to sort by. If you want to sort by a metric, use
(field, metric name) where field could be 'Ratio', 'Value',
'CI_Lower' and 'CI_Upper'. 'order' is optional and for categorical
column. 'ascending' is optional and default True. The result will be
displayed in the order specified by sort_by from top to bottom.
metric_order: An iterable. The metric will be displayed by the order from
left to right.
flip_color: A iterable of metric names that positive changes will be
displayed in red and negative changes in green.
hide_null_ctrl: If to hide control value or use '-' to represent it when
it is null,
display_expr_info: If to display 'Control_id', 'Is_Control' and
'Description' columns. Only has effect when aggregate_dimensions is
False.
auto_add_description: If add Control/Not Control as descriptions.
return_pre_agg_df: If to return the pre-aggregated df.
return_formatted_df: If to return raw HTML df to be rendered.
Returns:
Displays confidence interval nicely for df, or aggregated/formatted if
return_pre_agg_df/return_formatted_df is True.
"""
base = res.meterstick_change_base
if not melted:
res = utils.melt(res)
if base is not None:
# base always has the baseline so needs to be at left.
res = base.join(res)
comparison_suffix = [
AbsoluteChange('', '').name_tmpl.format(''),
PercentChange('', '').name_tmpl.format('')
]
comparison_suffix = '(%s)$' % '|'.join(comparison_suffix)
# Don't use inplace=True. It will change the index of 'base' too.
res.index = res.index.set_levels(
res.index.levels[0].str.replace(comparison_suffix, ''), 0)
show_control = True if show_control is None else show_control
metric_order = list(res.index.get_level_values(
0).unique()) if metric_order is None else metric_order
res = res.reset_index()
control = ctrl_id
condition_col = condition_column
if condition_column:
if len(condition_column) == 1:
condition_col = condition_column[0]
else:
res['_expr_id'] = res[condition_column].agg(', '.join, axis=1)
control = ', '.join(ctrl_id)
condition_col = '_expr_id'
metric_formats = (
default_metric_formats if metric_formats is None else metric_formats)
formatted_df = confidence_interval_display.get_formatted_df(
res,
split_by,
aggregate_dimensions,
show_control,
metric_formats,
ratio=value,
value='_base_value',
ci_upper=name + ' CI-upper',
ci_lower=name + ' CI-lower',
expr_id=condition_col,
ctrl_id=control,
sort_by=sort_by,
metric_order=metric_order,
flip_color=flip_color,
hide_null_ctrl=hide_null_ctrl,
display_expr_info=display_expr_info,
auto_add_description=auto_add_description,
return_pre_agg_df=return_pre_agg_df)
if return_pre_agg_df or return_formatted_df:
return formatted_df
display_formatted_df = confidence_interval_display.display_formatted_df
return display_formatted_df(formatted_df)
return display
class MetricWithCI(Operation):
"""Base class for Metrics that have confidence interval info in the return.
The return when melted, has columns like
Value Jackknife SE
or if confidence specified,
Value Jackknife CI-lower Jackknife CI-upper
if not melted, the columns are pd.MultiIndex like
Metric1 Metric2...
Value Jackknife SE (or CI-lower and CI-upper) Value Jackknife SE
The column for point estimate is usually "Value", but could be others like
"Percent Change" for comparison Metrics so don't rely on the name, but you can
assume what ever it's called, it's always the first column followed by
"... SE" or "... CI-lower" and "... CI-upper".
If confidence is speified, a display function will be bound to the returned
DataFrame so res.display() will display confidence interval and highlight
significant changes nicely in Colab and Jupyter notebook.
As the return has multiple columns even for one Metric, the default DataFrame
returned is in melted format, unlike vanilla Metric.
The main computation pipeline is used to compute stderr or confidence interval
bounds. Then we compute the point estimates and combine it with stderr or CI.
Similar to how you derive Metric, if you don't need vectorization, overwrite
compute(). If you need vectorization, overwrite compute_slices(), or even
simpler, get_samples(). See Jackknife and Bootstrap for examples.
Attributes:
unit: The column to go over (jackknife/bootstrap over) to get stderr.
confidence: The level of the confidence interval, must be in (0, 1). If
specified, we return confidence interval range instead of standard error.
Additionally, a display() function will be bound to the result so you can
visualize the confidence interval nicely in Colab and Jupyter notebook.
prefix: In the result, the column names will be like "{prefix} SE",
"{prefix} CI-upper".
sql_batch_size: The number of resamples to compute in one SQL run. It only
has effect in the 'mixed' mode of compute_on_sql(). Note that you can also
specify batch_size in compute_on_sql() directly, which precedes this one.
And all other attributes inherited from Operation.
"""
def __init__(self,
unit: Optional[Text],
child: Optional[metrics.Metric] = None,
confidence: Optional[float] = None,
name_tmpl: Optional[Text] = None,
prefix: Optional[Text] = None,
sql_batch_size=None,
**kwargs):
if confidence and not 0 < confidence < 1:
raise ValueError('Confidence must be in (0, 1).')
self.unit = unit
self.confidence = confidence
super(MetricWithCI, self).__init__(child, name_tmpl, **kwargs)
self.apply_name_tmpl = False
self.prefix = prefix
self.sql_batch_size = sql_batch_size
if not self.prefix and self.name_tmpl:
self.prefix = prefix or self.name_tmpl.format('').strip()
def compute_on_samples(self,
keyed_samples: Iterable[Tuple[Any, pd.DataFrame]],
split_by=None):
"""Iters through sample DataFrames and collects results.
Args:
keyed_samples: A tuple. The first element is the cache_key and the second
is the corresponding DataFrame. Remember a key should correspond to the
same data.
split_by: Something can be passed into DataFrame.group_by().
Returns:
List of results from samples.
"""
estimates = []
for keyed_sample in keyed_samples:
try:
cache_key, sample = keyed_sample
res = self.compute_child(
sample, split_by, melted=True, cache_key=cache_key)
estimates.append(res)
except Exception as e: # pylint: disable=broad-except
print(
'Warning: Failed on %s sample data for reason %s. If you see many '
'such failures, your data might be too sparse.' %
(self.name_tmpl.format(''), repr(e)))
finally:
# Jackknife keys are unique so can be kept longer.
if isinstance(self, Bootstrap) and cache_key is not None:
cache_key = self.wrap_cache_key(cache_key, split_by)
# In case errors occur so the top Metric was not computed, we don't
# want to prune because the leaf Metrics still need to be cleaned up.
self.flush_children(cache_key, split_by, prune=False)
# There are funtions outside meterstick directly call this, so don't change.
return estimates
def compute_children(self,
df: pd.DataFrame,
split_by=None,
melted=False,
return_dataframe=True,
cache_key=None):
del melted, return_dataframe, cache_key # unused
return self.compute_on_samples(self.get_samples(df, split_by), split_by)
def compute_on_children(self, children, split_by):
del split_by # unused
bucket_estimates = pd.concat(children, axis=1, sort=False)
return self.get_stderrs_or_ci_half_width(bucket_estimates)
def manipulate(self,
res,
melted=False,
return_dataframe=True,
apply_name_tmpl=False):
# Always return a melted df and don't add suffix like "Jackknife" because
# point_est won't have it.
del melted, return_dataframe # unused
base = res.meterstick_change_base
res = super(MetricWithCI, self).manipulate(res, True, True, apply_name_tmpl)
return self._add_base_to_res(res, base)
def compute_slices(self, df, split_by):
std = super(MetricWithCI, self).compute_slices(df, split_by)
point_est = self.compute_child(df, split_by, melted=True)
res = point_est.join(utils.melt(std))
if self.confidence:
res[self.prefix +
' CI-lower'] = res.iloc[:, 0] - res[self.prefix + ' CI-lower']
res[self.prefix + ' CI-upper'] += res.iloc[:, 0]
res = utils.unmelt(res)
base = self._compute_change_base(df, split_by)
return self._add_base_to_res(res, base)
def _compute_change_base(self, df, split_by, execute=None, mode=None):
"""Computes the base values for Change. It's used in res.display()."""
if not self.confidence:
return None
if len(self.children) != 1 or not isinstance(
self.children[0], (PercentChange, AbsoluteChange)):
return None
base = None
change = self.children[0]
to_split = (
split_by + change.extra_index if split_by else change.extra_index)
if execute is None:
base = change.compute_child(df, to_split)
else:
base = change.children[0].compute_on_sql(df, to_split, execute, mode=mode)
base.columns = [change.name_tmpl.format(c) for c in base.columns]
base = utils.melt(base)
base.columns = ['_base_value']
return base
@staticmethod
def _add_base_to_res(res, base):
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=UserWarning)
res.meterstick_change_base = base
return res
def final_compute(self,
res,
melted: bool = False,
return_dataframe: bool = True,
split_by: Optional[List[Text]] = None,
df=None):
"""Add a display function if confidence is specified."""
del return_dataframe # unused
base = res.meterstick_change_base
if not melted:
res = utils.unmelt(res)
self._add_base_to_res(res, base)
if self.confidence:
extra_idx = list(metrics.get_extra_idx(self))
indexes = split_by + extra_idx if split_by else extra_idx
if len(self.children) == 1 and isinstance(
self.children[0], (PercentChange, AbsoluteChange)):
change = self.children[0]
indexes = [i for i in indexes if i not in change.extra_index]
res = self.add_display_fn(res, indexes, melted)
return res
def add_display_fn(self, res, split_by, melted):
"""Bounds a display function to res so res.display() works."""
value = res.columns[0] if melted else res.columns[0][1]
ctrl_id = None
condition_col = None
metric_formats = None
if len(self.children) == 1 and isinstance(self.children[0],
(PercentChange, AbsoluteChange)):
change = self.children[0]
ctrl_id = change.baseline_key
condition_col = change.extra_index
if isinstance(self.children[0], PercentChange):
metric_formats = {'Ratio': 'percent'}
fn = get_display_fn(self.prefix, split_by, melted, value, condition_col,
ctrl_id, metric_formats)
# pylint: disable=no-value-for-parameter
res.display = fn.__get__(res) # pytype: disable=attribute-error
# pylint: enable=no-value-for-parameter
return res
@staticmethod
def get_stderrs(bucket_estimates):
dof = bucket_estimates.count(axis=1) - 1
return bucket_estimates.std(1), dof
def get_ci_width(self, stderrs, dof):
"""You can return asymmetrical confidence interval."""
half_width = stderrs * stats.t.ppf((1 + self.confidence) / 2, dof)
return half_width, half_width
def get_stderrs_or_ci_half_width(self, bucket_estimates):
"""Returns confidence interval infomation in an unmelted DataFrame."""
stderrs, dof = self.get_stderrs(bucket_estimates)
if self.confidence:
res = pd.DataFrame(self.get_ci_width(stderrs, dof)).T
res.columns = [self.prefix + ' CI-lower', self.prefix + ' CI-upper']
else:
res = pd.DataFrame(stderrs, columns=[self.prefix + ' SE'])
res = utils.unmelt(res)
return res
def get_samples(self, df, split_by=None):
raise NotImplementedError
def get_sql_and_with_clause(self, table, split_by, global_filter, indexes,
local_filter, with_data):
"""Gets the SQL for Jackknife or Bootstrap.
The query is constructed by
1. Resample the table.
2. Compute the child Metric on the resampled data.
3. Compute the standard error from #2.
4. Compute the point estimate from original table.
5. sql.Join #3 and #4.
6. If metric has confidence level specified, we also get the degrees of
freedom so we can later compute the critical value of t distribution in
Python.
7. If metric only has one child and it's PercentChange or AbsoluteChange, we
also get the base values for comparison. They will be used in the
res.display().
Args:
table: The table we want to query from.
split_by: The columns that we use to split the data.
global_filter: The sql.Filters that can be applied to the whole Metric
tree.
indexes: The columns that we shouldn't apply any arithmetic operation.
local_filter: The sql.Filters that have been accumulated so far.
with_data: A global variable that contains all the WITH clauses we need.
Returns:
The SQL instance for metric, without the WITH clause component.
The global with_data which holds all datasources we need in the WITH
clause.
"""
if not isinstance(self, (Jackknife, Bootstrap)):
raise ValueError('Not a Jackknife or Bootstrap!')
local_filter = sql.Filters([self.where, local_filter]).remove(global_filter)
name = 'Jackknife' if isinstance(self, Jackknife) else 'Bootstrap'
se, with_data = get_se(self, table, split_by, global_filter, indexes,
local_filter, with_data)
se_alias, se_rename = with_data.merge(sql.Datasource(se, name + 'SE'))
pt_est, with_data = self.children[0].get_sql_and_with_clause(
table, split_by, global_filter, indexes, local_filter, with_data)
pt_est_alias, pt_est_rename = with_data.merge(
sql.Datasource(pt_est, name + 'PointEstimate'))
columns = sql.Columns()
using = sql.Columns(se.groupby)
pt_est_col = []
for c in pt_est.columns:
if c in indexes.aliases:
using.add(c)
else:
pt_est_col.append(
sql.Column(
'%s.%s' % (pt_est_alias, pt_est_rename.get(c.alias, c.alias)),
alias=c.alias_raw))
se_cols = []
for c in se.columns:
if c not in indexes.aliases:
se_cols.append(
sql.Column(
'%s.%s' % (se_alias, se_rename.get(c.alias, c.alias)),
alias=c.alias_raw))
if self.confidence:
dof_cols = se_cols[1::2]
se_cols = se_cols[::2]
cols = zip(pt_est_col, se_cols, dof_cols)
else:
cols = zip(pt_est_col, se_cols)
columns.add(cols)
has_base_vals = False
if self.confidence:
child = self.children[0]
if len(self.children) == 1 and isinstance(
child, (PercentChange, AbsoluteChange)):
has_base_vals = True
base, with_data = child.children[0].get_sql_and_with_clause(
table,
sql.Columns(split_by).add(child.extra_index), global_filter,
indexes, local_filter, with_data)
base_alias, base_rename = with_data.merge(
sql.Datasource(base, '_ShouldAlreadyExists'))
columns.add(
sql.Column(
'%s.%s' % (base_alias, base_rename.get(c.alias, c.alias)),
alias=c.alias_raw) for c in base.columns.difference(indexes))
join = 'LEFT' if using else 'CROSS'
from_data = sql.Join(
pt_est_alias, se_alias, join=join, using=using)
if has_base_vals:
from_data = from_data.join(base_alias, join=join, using=using)
return sql.Sql(using.add(columns), from_data), with_data
def compute_on_sql(
self,
table,
split_by=None,
execute=None,
melted=False,
mode=None,
batch_size=None,
):
"""Computes self in pure SQL or a mixed of SQL and Python.
Args:
table: The table we want to query from.
split_by: The columns that we use to split the data.
execute: A function that can executes a SQL query and returns a DataFrame.
melted: Whether to transform the result to long format.
mode: For Operations, there are two ways to compute the result in SQL, one
is computing everything in SQL, the other is computing the children
in SQL then the rest in Python. We call them 'sql' and 'mixed' modes. If
self has grandchildren, then we can compute the chilren in two modes
too. We can call them light `mixed` mode and recursive `mixed` mode.
If `mode` is 'sql', it computes everything in SQL.
If `mode` is 'mixed', it computes everything recursively in the `mixed`
mode.
We recommend `mode` to be None. This mode tries the `sql` mode first, if
not implemented, switch to light `mixed` mode. The logic is applied
recursively from the root to leaf Metrics, so a Metric tree could have
top 3 layeres computed in Python and the bottom in SQL. In summary,
everything can be computed in SQL is computed in SQL.
batch_size: The number of resamples to compute in one SQL run. It only has
effect in the 'mixed' mode. It precedes self.batch_size.
Returns:
A pandas DataFrame. It's the computeation of self in SQL.
"""
self._runtime_batch_size = batch_size
try:
return super(MetricWithCI, self).compute_on_sql(table, split_by, execute,
melted, mode)
finally:
self._runtime_batch_size = None
def compute_through_sql(self, table, split_by, execute, mode):
if mode not in (None, 'sql', 'mixed', 'magic'):
raise ValueError('Mode %s is not supported!' % mode)
if mode in (None, 'sql'):
if self.all_computable_in_pure_sql(False):
try:
return self.compute_on_sql_sql_mode(table, split_by, execute)
except Exception as e: # pylint: disable=broad-except
raise utils.MaybeBadSqlModeError(use_batch_size=True) from e
elif mode == 'sql':
raise ValueError('%s is not computable in pure SQL.' % self.name)
if self.where:
table = sql.Sql(sql.Column('*', auto_alias=False), table, self.where)
return self.compute_on_sql_mixed_mode(table, split_by, execute, mode)
def compute_on_sql_sql_mode(self, table, split_by, execute):
"""Computes self in a SQL query and process the result."""
res = super(MetricWithCI,
self).compute_on_sql_sql_mode(table, split_by, execute)
sub_dfs = []
base = None
if self.confidence:
if len(self.children) == 1 and isinstance(
self.children[0], (PercentChange, AbsoluteChange)):
# The first 3n columns are Value, SE, dof for n Metrics. The
# last n columns are the base values of Change.
if len(res.columns) % 4:
raise ValueError('Wrong shape for a MetricWithCI with confidence!')
n_metrics = len(res.columns) // 4
base = res.iloc[:, -n_metrics:]
res = res.iloc[:, :3 * n_metrics]
change = self.children[0]
base.columns = [change.name_tmpl.format(c) for c in base.columns]
base = utils.melt(base)
base.columns = ['_base_value']
if len(res.columns) % 3:
raise ValueError('Wrong shape for a MetricWithCI with confidence!')
# The columns are like metric1, metric1 jackknife SE, metric1 dof, ...
metric_names = res.columns[::3]
sub_dfs = []
ci_lower = self.prefix + ' CI-lower'
ci_upper = self.prefix + ' CI-upper'
for i in range(0, len(res.columns), 3):
pt_est = res.iloc[:, i]
half_width = self.get_ci_width(res.iloc[:, i + 1], res.iloc[:, i + 2])
sub_df = pd.DataFrame(
{
'Value': res.iloc[:, i],
ci_lower: pt_est - half_width[0],
ci_upper: pt_est + half_width[1]
},
columns=['Value', ci_lower, ci_upper])
sub_dfs.append(sub_df)
else:
if len(res.columns) % 2:
raise ValueError('Wrong shape for a MetricWithCI!')
# The columns are like metric1, metric1 jackknife SE, ...
metric_names = res.columns[::2]
for i in range(0, len(res.columns), 2):
sub_df = res.iloc[:, [i, i + 1]]
sub_df.columns = ['Value', self.prefix + ' SE']
sub_dfs.append(sub_df)
res = pd.concat((sub_dfs), 1, keys=metric_names, names=['Metric'])
return self._add_base_to_res(res, base)
def compute_on_sql_mixed_mode(self, table, split_by, execute, mode=None):
batch_size = self._runtime_batch_size or self.sql_batch_size
try:
replicates = self.compute_children_sql(table, split_by, execute, mode,
batch_size)
except utils.MaybeBadSqlModeError:
raise
except Exception as e: # pylint: disable=broad-except
raise utils.MaybeBadSqlModeError(batch_size=batch_size) from e
std = self.compute_on_children(replicates, split_by)
point_est = self.children[0].compute_on_sql(table, split_by, execute, True,
mode)
res = point_est.join(utils.melt(std))
if self.confidence:
res[self.prefix +
' CI-lower'] = res.iloc[:, 0] - res[self.prefix + ' CI-lower']
res[self.prefix + ' CI-upper'] += res.iloc[:, 0]
res = utils.unmelt(res)
base = self._compute_change_base(table, split_by, execute, mode)
return self._add_base_to_res(res, base)
def compute_children_sql(self,
table,
split_by,
execute,
mode=None,
batch_size=None):
"""The return should be similar to compute_children()."""
raise NotImplementedError
def get_sum_ct_monkey_patch_fn(unit, original_split_by, original_compute):
"""Gets a function that can be monkey patched to Sum/Count.compute_slices.
Args:
unit: The column whose levels define the jackknife buckets.
original_split_by: The split_by passed to Jackknife().compute_on().
original_compute: The compute_slices() of Sum or Count. We will monkey patch
it.
Returns:
A function that can be monkey patched to Sum/Count.compute_slices().
"""
def precompute_loo(self, df, split_by=None):
"""Precomputes leave-one-out (LOO) results to make Jackknife faster.
For Sum, Count, Dot and Mean, it's possible to compute the LOO estimates in
a vectorized way. For Sum and Count, we can get the LOO estimates by
subtracting the sum/count of each bucket from the total. Here we precompute
and cache the LOO results.
Args:
self: The Sum or Count instance callling this function.
df: The DataFrame passed to Sum/Count.compute_slies().
split_by: The split_by passed to Sum/Count.compute_slies().
Returns:
Same as what normal Sum/Count.compute_slies() would have returned.
"""
total = original_compute(self, df, split_by)
if isinstance(self, metrics.Count) and self.distinct:
# For Count distinct, we cannot cut the corner.
return total
split_by_with_unit = [unit] + split_by if split_by else [unit]
each_bucket = original_compute(self, df, split_by_with_unit)
each_bucket = utils.adjust_slices_for_loo(each_bucket, original_split_by)
loo = total - each_bucket
if split_by:
# total - each_bucket might put the unit as the innermost level, but we
# want the unit as the outermost level.
loo = loo.reorder_levels(split_by_with_unit)
key = utils.CacheKey(('_RESERVED', 'Jackknife', unit), self.cache_key.where,
[unit] + split_by)
self.save_to_cache(key, loo)
self.tmp_cache_keys.add(key)
buckets = loo.index.get_level_values(0).unique() if split_by else loo.index
for bucket in buckets:
key = self.wrap_cache_key(('_RESERVED', 'Jackknife', unit, bucket),
split_by, self.cache_key.where)
self.save_to_cache(key, loo.loc[bucket])
self.tmp_cache_keys.add(key)
return total
return precompute_loo
def get_mean_monkey_patch_fn(unit, original_split_by):
"""Gets a function that can be monkey patched to Mean.compute_slices.
Args:
unit: The column whose levels define the jackknife buckets.
original_split_by: The split_by passed to Jackknife().compute_on().
Returns:
A function that can be monkey patched to Mean.compute_slices().
"""
def precompute_loo(self, df, split_by=None):
"""Precomputes leave-one-out (LOO) results to make Jackknife faster.
For Sum, Count, Dot and Mean, it's possible to compute the LOO estimates in
a vectorized way. LOO mean is just LOO sum / LOO count. Here we precompute
and cache the LOO results.
Args:
self: The Mean instance callling this function.
df: The DataFrame passed to Mean.compute_slies().
split_by: The split_by passed to Mean.compute_slies().
Returns:
Same as what normal Mean.compute_slies() would have returned.
"""
data = df.copy()
split_by_with_unit = [unit] + split_by if split_by else [unit]
if self.weight:
weighted_var = '_weighted_%s' % self.var
data[weighted_var] = data[self.var] * data[self.weight]
total_sum = self.group(data, split_by)[weighted_var].sum()
total_weight = self.group(data, split_by)[self.weight].sum()
bucket_sum = self.group(data, split_by_with_unit)[weighted_var].sum()
bucket_sum = utils.adjust_slices_for_loo(bucket_sum, original_split_by)
bucket_weight = self.group(data, split_by_with_unit)[self.weight].sum()
bucket_weight = utils.adjust_slices_for_loo(bucket_weight,
original_split_by)
loo_sum = total_sum - bucket_sum
loo_weight = total_weight - bucket_weight
if split_by:
# total - bucket_sum might put the unit as the innermost level, but we
# want the unit as the outermost level.
loo_sum = loo_sum.reorder_levels(split_by_with_unit)
loo_weight = loo_weight.reorder_levels(split_by_with_unit)
loo = loo_sum / loo_weight
mean = total_sum / total_weight
else:
total_sum = self.group(data, split_by)[self.var].sum()
bucket_sum = self.group(data, split_by_with_unit)[self.var].sum()
bucket_sum = utils.adjust_slices_for_loo(bucket_sum, original_split_by)
total_ct = self.group(data, split_by)[self.var].count()
bucket_ct = self.group(data, split_by_with_unit)[self.var].count()
bucket_ct = utils.adjust_slices_for_loo(bucket_ct, original_split_by)
loo_sum = total_sum - bucket_sum
loo_ct = total_ct - bucket_ct
loo = loo_sum / loo_ct
mean = total_sum / total_ct
if split_by:
loo = loo.reorder_levels(split_by_with_unit)
buckets = loo.index.get_level_values(0).unique() if split_by else loo.index
key = utils.CacheKey(('_RESERVED', 'Jackknife', unit), self.cache_key.where,
[unit] + split_by)
self.save_to_cache(key, loo)
self.tmp_cache_keys.add(key)
for bucket in buckets:
key = utils.CacheKey(('_RESERVED', 'Jackknife', unit, bucket),
self.cache_key.where, split_by)
self.save_to_cache(key, loo.loc[bucket])
self.tmp_cache_keys.add(key)
return mean
return precompute_loo
def get_dot_monkey_patch_fn(unit, original_split_by, original_compute):
"""Gets a function that can be monkey patched to Dot.compute_slices.
Args:
unit: The column whose levels define the jackknife buckets.
original_split_by: The split_by passed to Jackknife().compute_on().
original_compute: The compute_slices() of Dot. We will monkey patch it.
Returns:
A function that can be monkey patched to Dot.compute_slices().
"""
def precompute_loo(self, df, split_by=None):
"""Precomputes leave-one-out (LOO) results to make Jackknife faster.
For Sum, Count, Dot and Mean, it's possible to compute the LOO estimates in
a vectorized way. Dot is the Sum of the product of two columns, so similarly
to Sum, we can get the LOO estimates by subtracting the sum of each bucket
from the total. Here we precompute and cache the LOO results.
Args:
self: The Dot instance callling this function.
df: The DataFrame passed to Mean.compute_slies().
split_by: The split_by passed to Mean.compute_slies().
Returns:
Same as what normal Dot.compute_slies() would have returned.
"""
data = df.copy()
split_by_with_unit = [unit] + split_by if split_by else [unit]
if not self.normalize:
total = original_compute(self, df, split_by)
each_bucket = original_compute(self, df, split_by_with_unit)
each_bucket = utils.adjust_slices_for_loo(each_bucket, original_split_by)
loo = total - each_bucket
if split_by:
# total - each_bucket might put the unit as the innermost level, but we
# want the unit as the outermost level.
loo = loo.reorder_levels(split_by_with_unit)
else:
prod = '_meterstick_dot_prod'
data[prod] = data[self.var] * data[self.var2]
total_sum = self.group(data, split_by)[prod].sum()
bucket_sum = self.group(data, split_by_with_unit)[prod].sum()
bucket_sum = utils.adjust_slices_for_loo(bucket_sum, original_split_by)
total_ct = self.group(data, split_by)[prod].count()
bucket_ct = self.group(data, split_by_with_unit)[prod].count()
bucket_ct = utils.adjust_slices_for_loo(bucket_ct, original_split_by)
loo_sum = total_sum - bucket_sum
loo_ct = total_ct - bucket_ct
loo = loo_sum / loo_ct
total = total_sum / total_ct
if split_by:
loo = loo.reorder_levels(split_by_with_unit)
buckets = loo.index.get_level_values(0).unique() if split_by else loo.index
key = utils.CacheKey(('_RESERVED', 'Jackknife', unit), self.cache_key.where,
[unit] + split_by)
self.save_to_cache(key, loo)
self.tmp_cache_keys.add(key)
for bucket in buckets:
key = utils.CacheKey(('_RESERVED', 'Jackknife', unit, bucket),
self.cache_key.where, split_by)
self.save_to_cache(key, loo.loc[bucket])
self.tmp_cache_keys.add(key)
return total
return precompute_loo
def save_to_cache_for_jackknife(self, key, val, split_by=None):
"""Used to monkey patch the save_to_cache() during Jackknife.precompute().
What cache_key to use for the point estimate of Jackknife is tricky because we
want to support two use cases at the same time.
1. We want sumx to be computed only once in
MetricList([Jackknife(sumx), sumx]).compute_on(df, return_dataframe=False),
so the key for point estimate should be the same sumx uses.
2. But then it will fail when multiple Jackknifes are involved. For example,
(Jackknife(unit1, sumx) - Jackknife(unit2, sumx)).compute_on(df)
will fail because two Jackknifes share point estimate but not LOO estimates.
When the 2nd Jackknife precomputes its point esitmate, as it uses the same key
as the 1st one, it will mistakenly assume LOO has been cached, but
unfortunately it's not true.
The solution here is we use different keys for different Jackknifes, so LOO
will always be precomputed. Additionally we cache the point estimate again
with the key other Metrics like Sum would use so they can reuse it.
Args:
self: An instance of metrics.Metric.
key: The cache key currently being used in computation.
val: The value to cache.
split_by: Something can be passed into df.group_by().
"""
key = self.wrap_cache_key(key, split_by)
if isinstance(key.key, tuple) and key.key[:2] == ('_RESERVED', 'jk'):
val = val.copy() if isinstance(val, (pd.Series, pd.DataFrame)) else val
base_key = key.key[2]
base_key = utils.CacheKey(base_key, key.where, key.split_by, key.slice_val)
self.cache[base_key] = val
if utils.is_tmp_key(base_key):
self.tmp_cache_keys.add(base_key)
val = val.copy() if isinstance(val, (pd.Series, pd.DataFrame)) else val
self.cache[key] = val
class Jackknife(MetricWithCI):
"""Class for Jackknife estimates of standard errors.
Attributes:
unit: The column whose levels define the jackknife buckets.
confidence: The level of the confidence interval, must be in (0, 1). If
specified, we return confidence interval range instead of standard error.
Additionally, a display() function will be bound to the result so you can
visualize the confidence interval nicely in Colab and Jupyter notebook.
children: A tuple of a Metric whose result we jackknife on.
can_precompute: If all leaf Metrics are Sum, Count, Dot, and Mean, then we
can cut the corner to compute leave-one-out estimates.
And all other attributes inherited from Operation.
"""
def __init__(self,
unit: Text,
child: Optional[metrics.Metric] = None,
confidence: Optional[float] = None,
**kwargs):
super(Jackknife, self).__init__(unit, child, confidence, '{} Jackknife',
None, **kwargs)
self.can_precompute = self.can_be_precomputed()
if confidence:
self.computable_in_pure_sql = False
def __call__(self, child: metrics.Metric):
jk = super(Jackknife, self).__call__(child)
jk.can_precompute = jk.can_be_precomputed()
return jk
def precompute(self, df, split_by=None):
"""Caches point estimate and leave-one-out (LOO) results for Sum/Count/Mean.
For Sum, Count, Dot and Mean, it's possible to compute the LOO estimates in
a vectorized way. For Sum and Count, we can get the LOO estimates
by subtracting the sum/count of each bucket from the total. For Mean, LOO
mean is LOO sum / LOO count. So we can monkey patch the compute_slices() of
the Metrics to cache the LOO results under certain keys when we precompute
the point estimate.
Args:
df: The DataFrame passed from compute_on().
split_by: The split_by passed from compute_on().
Returns:
The input df. All we do here is saving precomputed stuff to cache.
"""
original_sum_compute_slices = metrics.Sum.compute_slices
original_ct_compute_slices = metrics.Count.compute_slices
original_mean_compute_slices = metrics.Mean.compute_slices
original_dot_compute_slices = metrics.Dot.compute_slices
original_save_to_cache = metrics.Metric.save_to_cache
try:
metrics.Sum.compute_slices = get_sum_ct_monkey_patch_fn(
self.unit, split_by, original_sum_compute_slices)
metrics.Count.compute_slices = get_sum_ct_monkey_patch_fn(
self.unit, split_by, original_ct_compute_slices)
metrics.Mean.compute_slices = get_mean_monkey_patch_fn(
self.unit, split_by)
metrics.Dot.compute_slices = get_dot_monkey_patch_fn(
self.unit, split_by, original_dot_compute_slices)
metrics.Metric.save_to_cache = save_to_cache_for_jackknife
cache_key = self.cache_key or self.RESERVED_KEY
cache_key = ('_RESERVED', 'jk', cache_key, self.unit)
self.compute_child(df, split_by, cache_key=cache_key)
finally:
metrics.Sum.compute_slices = original_sum_compute_slices
metrics.Count.compute_slices = original_ct_compute_slices
metrics.Mean.compute_slices = original_mean_compute_slices
metrics.Dot.compute_slices = original_dot_compute_slices
metrics.Metric.save_to_cache = original_save_to_cache
return df
def get_samples(self, df, split_by=None):
"""Yields leave-one-out (LOO) DataFrame with level value.
This step is the bottleneck of Jackknife so we have some tricks here.
1. If all leaf Metrics are Sum or Count, whose LOO results have already been
calculated, then we don't bother to get the right DataFrame. All we need is
the right cache_key to retrive the results. This saves lots of time.
2. If split_by is True, some slices may be missing buckets, so we only keep
the slices that appear in that bucket. In other words, if a slice doesn't
have bucket i, then the leave-i-out sample won't have the slice.
3. We yield the cache_key for bucket i together with the leave-i-out
DataFrame because we need the cache_key to retrieve results.
Args:
df: The DataFrame to compute on.
split_by: Something can be passed into df.group_by().
Yields:
('_RESERVED', 'Jackknife', unit, i) and the leave-i-out DataFrame.
"""
levels = df[self.unit].unique()
if len(levels) < 2:
raise ValueError('Too few %s to jackknife.' % self.unit)
if self.can_precompute:
for lvl in levels:
yield ('_RESERVED', 'Jackknife', self.unit, lvl), None
else:
if not split_by:
for lvl in levels:
yield ('_RESERVED', 'Jackknife', self.unit,
lvl), df[df[self.unit] != lvl]
else:
df = df.set_index(split_by)
max_slices = len(df.index.unique())
for lvl, idx in df.groupby(self.unit).groups.items():
df_rest = df[df[self.unit] != lvl]
unique_slice_val = idx.unique()
if len(unique_slice_val) != max_slices:
# Keep only the slices that appeared in the dropped bucket.
df_rest = df_rest[df_rest.index.isin(unique_slice_val)]
yield ('_RESERVED', 'Jackknife', self.unit,
lvl), df_rest.reset_index()
def compute_children(self,
df: pd.DataFrame,
split_by=None,
melted=False,
return_dataframe=True,
cache_key=None):
del melted, return_dataframe, cache_key # unused
replicates = None
if self.can_precompute:
try:
replicates = self.compute_child(
None, [self.unit] + split_by,
True,
cache_key=('_RESERVED', 'Jackknife', self.unit))
replicates = [replicates.unstack(self.unit)]
except Exception: # pylint: disable=broad-except
pass # Fall back to computing slice by slice to salvage good slices.
if replicates is None:
samples = self.get_samples(df, split_by)
replicates = self.compute_on_samples(samples, split_by)
return replicates
@staticmethod
def get_stderrs(bucket_estimates):
stderrs, dof = super(Jackknife, Jackknife).get_stderrs(bucket_estimates)
return stderrs * dof / np.sqrt(dof + 1), dof
def can_be_precomputed(self):
"""If all leaf Metrics are Sum/Count/Dot/Mean, LOO can be precomputed."""
for m in self.traverse():
if isinstance(m, Operation) and not m.precomputable_in_jk:
return False
if not m.children and not isinstance(
m, (metrics.Sum, metrics.Count, metrics.Mean, metrics.Dot)):
return False
if isinstance(m, metrics.Count) and m.distinct:
return False
return True
def compute_children_sql(self, table, split_by, execute, mode, batch_size):
"""Compute the children on leave-one-out data in SQL."""
batch_size = batch_size or 1
slice_and_units = sql.Sql(
sql.Columns(split_by + [self.unit], distinct=True), table, self.where)
slice_and_units = execute(str(slice_and_units))
if split_by:
slice_and_units.set_index(split_by, inplace=True)
replicates = []
unique_units = slice_and_units[self.unit].unique()
if batch_size == 1:
loo_sql = sql.Sql(sql.Column('*', auto_alias=False), table)
for unit in unique_units:
loo_where = '%s != "%s"' % (self.unit, unit)
if pd.api.types.is_numeric_dtype(slice_and_units[self.unit]):
loo_where = '%s != %s' % (self.unit, unit)
loo_sql.where = sql.Filters((self.where, loo_where))
loo = self.children[0].compute_on_sql(
loo_sql, split_by, execute, mode=mode)
# If a slice doesn't have the unit in the input data, we should exclude
# the slice in the loo.
if split_by:
loo = slice_and_units[slice_and_units[self.unit] == unit].join(loo)
loo.drop(self.unit, 1, inplace=True)
replicates.append(utils.melt(loo))
else:
if split_by:
slice_and_units.set_index(self.unit, append=True, inplace=True)
for i in range(int(np.ceil(len(unique_units) / batch_size))):
units = list(unique_units[i * batch_size:(i + 1) * batch_size])
loo = sql.Sql(
sql.Column('*', auto_alias=False),
sql.Datasource('UNNEST(%s)' % units, '_resample_idx').join(
table, on='_resample_idx != %s' % self.unit), self.where)
loo = self.children[0].compute_on_sql(loo, split_by + ['_resample_idx'],
execute, True, mode)
# If a slice doesn't have the unit in the input data, we should exclude
# the slice in the loo.
if split_by:
loo.index.set_names(self.unit, level='_resample_idx', inplace=True)
loo = slice_and_units.join(utils.unmelt(loo), how='inner')
loo = utils.melt(loo).unstack(self.unit)
else:
loo = loo.unstack('_resample_idx')
replicates.append(loo)
return replicates
class Bootstrap(MetricWithCI):
"""Class for Bootstrap estimates of standard errors.
Attributes:
unit: The column representing the blocks to be resampled in block bootstrap.
If specified we sample the unique blocks in the `unit` column, otherwise
we sample rows.
n_replicates: The number of bootstrap replicates. In "What Teachers Should
Know About the Bootstrap" <NAME> recommends 10000 for routine use
https://amstat.tandfonline.com/doi/full/10.1080/00031305.2015.1089789.
confidence: The level of the confidence interval, must be in (0, 1). If
specified, we return confidence interval range instead of standard error.
Additionally, a display() function will be bound to the result so you can
visualize the confidence interval nicely in Colab and Jupyter notebook.
children: A tuple of a Metric whose result we bootstrap on.
And all other attributes inherited from Operation.
"""
def __init__(self,
unit: Optional[Text] = None,
child: Optional[metrics.Metric] = None,
n_replicates: int = 10000,
confidence: Optional[float] = None,
**kwargs):
super(Bootstrap, self).__init__(unit, child, confidence, '{} Bootstrap',
None, **kwargs)
self.n_replicates = n_replicates
def get_samples(self, df, split_by=None):
split_by = [split_by] if isinstance(split_by, str) else split_by or []
if self.unit:
df = df.set_index(split_by + [self.unit])
for _ in range(self.n_replicates):
if self.unit is None:
yield ('_RESERVED', 'Bootstrap', None), self.group(df, split_by).sample(
frac=1, replace=True)
else:
if split_by:
resampled = []
for idx in df.groupby(split_by).groups.values():
resampled.append(idx.unique().to_series().sample(
frac=1, replace=True))
resampled = | pd.concat(resampled) | pandas.concat |
# fmt: off
import warnings
from collections.abc import Iterable
from copy import copy, deepcopy
import numpy as np
import pandas as pd
from plotly import express as px
from plotly import graph_objects as go
from plotly.subplots import make_subplots
from ross.bearing_seal_element import BearingElement, SealElement
from ross.disk_element import DiskElement
from ross.materials import steel
from ross.plotly_theme import tableau_colors
from ross.rotor_assembly import Rotor
from ross.shaft_element import ShaftElement
from ross.units import Q_
from scipy.interpolate import interp1d
from scipy.signal import argrelextrema
# fmt: on
# set Plotly palette of colors
colors1 = px.colors.qualitative.Dark24
__all__ = ["Report", "report_example"]
class Report:
"""Report according to standard analysis.
- Perform unbalance response
- Perform Stability_level1 analysis
- Apply Level 1 Screening Criteria
- Perform Stability_level2 analysis
Parameters
----------
rotor : object
A rotor built from ross.Rotor class.
config : object
An instance of class report.Config() with the analyses configurations.
Attributes
----------
rotor_type: str
Defines if the rotor is between bearings or overhung
disk_nodes: list
List of disk between bearings or overhung (depending on the
rotor type)
Returns
-------
A Report object
Example
-------
>>> import ross as rs
>>> from ross.materials import steel
>>> import report as rp
>>> # Building the rotor model
>>> i_d = 0
>>> o_d = 0.05
>>> n = 6
>>> L = [0.25 for _ in range(n)]
# Shaft
>>> shaft_elem = [
... rs.ShaftElement(
... l,
... i_d,
... o_d,
... material=steel,
... shear_effects=True,
... rotary_inertia=True,
... gyroscopic=True,
... )
... for l in L
... ]
# Disks
>>> disk0 = rs.DiskElement.from_geometry(
... n=2, material=steel, width=0.07, i_d=0.05, o_d=0.28
... )
>>> disk1 = rs.DiskElement.from_geometry(
... n=4, material=steel, width=0.07, i_d=0.05, o_d=0.28
... )
# Bearings
>>> stfx = [0.4e7, 0.5e7, 0.6e7, 0.7e7]
>>> stfy = [0.8e7, 0.9e7, 1.0e7, 1.1e7]
>>> freq = [400, 800, 1200, 1600]
>>> bearing0 = rs.BearingElement(0, kxx=stfx, kyy=stfy, cxx=2e3, frequency=freq)
>>> bearing1 = rs.BearingElement(6, kxx=stfx, kyy=stfy, cxx=2e3, frequency=freq)
>>> oper_clerance_brg = [bearing0, bearing1]
# Rotor
>>> rotor = Rotor(shaft_elem, [disk0, disk1], [bearing0, bearing1])
# coefficients for minimum clearance bearings
>>> stfx = [0.7e7, 0.8e7, 0.9e7, 1.0e7]
>>> dampx = [2.0e3, 1.9e3, 1.8e3, 1.7e3]
>>> freq = [400, 800, 1200, 1600]
>>> bearing0 = rs.BearingElement(0, kxx=stfx, cxx=dampx, frequency=freq)
>>> bearing1 = rs.BearingElement(6, kxx=stfx, cxx=dampx, frequency=freq)
>>> min_clearance_brg = [bearing0, bearing1]
# coefficients for maximum clearance bearings
>>> stfx = [0.4e7, 0.5e7, 0.6e7, 0.7e7]
>>> dampx = [2.8e3, 2.7e3, 2.6e3, 2.5e3]
>>> freq = [400, 800, 1200, 1600]
>>> bearing0 = rs.BearingElement(0, kxx=stfx, cxx=dampx, frequency=freq)
>>> bearing1 = rs.BearingElement(6, kxx=stfx, cxx=dampx, frequency=freq)
>>> max_clearance_brg = [bearing0, bearing1]
# Analyses setup
>>> config = rp.Config()
>>> config.update_config(
... rotor_properties={
... "rotor_speeds": {
... "min_speed": 400,
... "max_speed": 1000,
... "oper_speed": 800,
... "trip_speed": 1200,
... "unit": "rad/s",
... }
... },
... bearings={
... "oper_clearance": oper_clerance_brg,
... "min_clearance": min_clearance_brg,
... "max_clearance": max_clearance_brg,
... },
... run_campbell={"speed_range": np.linspace(0, 1500, 51)},
... run_unbalance_response={
... "probes": {
... "node": [1, 4],
... "orientation": [np.pi / 2, np.pi / 2],
... "unit": "rad",
... },
... "frequency_range": np.linspace(0, 1500, 201),
... "plot_deflected_shape": {"speed": [615]},
... },
... plot_ucs={"stiffness_range": (5, 8)},
... stability_level1={
... "D": [0.35, 0.35],
... "H": [0.08, 0.08],
... "rated_power": [10000, 10000],
... "rho_ratio": [1.11, 1.14],
... "rho_suction": 30.45,
... "rho_discharge": 37.65,
... "length_unit": "m",
... "power_unit": "hp",
... "density_unit": "kg/m**3",
... },
... )
>>> report = rp.Report(rotor=rotor, config=config)
>>> report.rotor_type
'between_bearings'
"""
def __init__(self, rotor, config):
self.rotor = rotor
aux_df_disk = copy(rotor.df_disks)
aux_df_disk.drop(
aux_df_disk[(aux_df_disk.Ip <= 0) & (aux_df_disk.Id <= 0)].index,
inplace=True,
)
aux_df_disk.reset_index(drop=True)
self.aux_df_disk = aux_df_disk
# check if rotor is between bearings, single or double overhung
# fmt: off
if(
all(i > min(rotor.df_bearings["n"]) for i in aux_df_disk["n"]) and
all(i < max(rotor.df_bearings["n"]) for i in aux_df_disk["n"])
):
rotor_type = "between_bearings"
disk_nodes = [
i for i in aux_df_disk["n"] if(
i > min(rotor.df_bearings["n"]) and
i < max(rotor.df_bearings["n"])
)
]
elif(
any(i < min(rotor.df_bearings["n"]) for i in aux_df_disk["n"]) and
all(i < max(rotor.df_bearings["n"]) for i in aux_df_disk["n"])
):
rotor_type = "single_overhung_l"
disk_nodes = [
i for i in aux_df_disk["n"] if i < min(rotor.df_bearings["n"])
]
elif(
all(i > min(rotor.df_bearings["n"]) for i in aux_df_disk["n"]) and
any(i > max(rotor.df_bearings["n"]) for i in aux_df_disk["n"])
):
rotor_type = "single_overhung_r"
disk_nodes = [
i for i in aux_df_disk["n"] if i > max(rotor.df_bearings["n"])
]
elif(
any(i < min(rotor.df_bearings["n"]) for i in aux_df_disk["n"]) and
any(i > max(rotor.df_bearings["n"]) for i in aux_df_disk["n"])
):
rotor_type = "double_overhung"
disk_nodes = [
i for i in aux_df_disk["n"] if(
i < min(rotor.df_bearings["n"]) or
i > max(rotor.df_bearings["n"])
)
]
# fmt: on
self.rotor_type = rotor_type
self.disk_nodes = disk_nodes
machine_options = ["compressor", "turbine", "axial_flow"]
machine_type = config.rotor_properties.rotor_id.type
if machine_type not in machine_options:
raise ValueError(
"rotor_id.type is set to {}. Please choose between {}.".format(
machine_type, machine_options
)
)
if config.rotor_properties.rotor_id.tag is None:
config.update_config(rotor_properties=dict(rotor_id=dict(tag=rotor.tag)))
self.tag = config.rotor_properties.rotor_id.tag
self.config = config
@staticmethod
def _rotor_instance(rotor, bearing_list):
"""Build an instance of an auxiliary rotor with different bearing clearances.
Parameters
----------
rotor : object
A rotor built from rotor_assembly.
bearing_list : list
List with the bearing elements.
Returns
-------
aux_rotor : Rotor.object
Returns a rotor object copy with different bearing clearance.
Example
-------
>>> import ross as rs
>>> import report as rp
>>> stfx = [0.4e7, 0.5e7, 0.6e7, 0.7e7]
>>> damp = [2.8e3, 2.7e3, 2.6e3, 2.5e3]
>>> freq = [400, 800, 1200, 1600]
>>> bearing0 = rs.BearingElement(0, kxx=stfx, cxx=damp, frequency=freq)
>>> bearing1 = rs.BearingElement(6, kxx=stfx, cxx=damp, frequency=freq)
>>> bearings = [bearing0, bearing1]
>>> rotor = rs.rotor_example()
>>> report = rp.report_example()
>>> aux_rotor = report._rotor_instance(rotor, bearings)
"""
sh_elm = rotor.shaft_elements
dk_elm = rotor.disk_elements
pm_elm = rotor.point_mass_elements
tag = rotor.tag
aux_rotor = Rotor(sh_elm, dk_elm, bearing_list, pm_elm, tag=tag)
return aux_rotor
def run_report(self):
"""Run rotordynamics report.
This method runs the rotordynamics analyses and prepare the results to
generate the PDF report.
Returns
-------
fig_ucs : list
List with undamped critical speed map figures.
fig_mode_shape : list
List with mode shape figures.
fig_unbalance : list
List with unbalance response figures.
df_unbalance : dataframe
Dataframe for the unbalance response informations.
fig_a_lvl1 : list
List with "Applied Cross-Coupled Stiffness" (stability level 1) figures.
fig_b_lvl1 : list
List with "CSR vs. Mean Gas Density" (stability level 1) figures.
df_lvl2 : dataframe
Dataframe for the stability level 2 informations.
summaries : pd.Dataframe
Dataframes with a summary of stability level 1 and 2 analyses.
Example
-------
>>> import report as rp
>>> report = rp.analysis.report_example()
>>> # to run the report analysis, use:
>>> # results = report.run_report()
"""
rotor = copy(self.rotor)
# static analysis
fig_static = self._plot_static_analysis()
results_dict = {
"static_analysis": fig_static,
"oper_clearance": {},
"min_clearance": {},
"max_clearance": {},
}
# loop through bearings clearance
for k, bearings in self.config.bearings.__dict__.items():
if bearings is None:
warnings.warn(
f"Option '{k}' is empty. No analyses are performed for {k} bearings."
)
elif not isinstance(bearings, Iterable):
raise ValueError(
"{} option must be a list of bearing elements".format(k)
)
else:
self.rotor = self._rotor_instance(rotor, bearings)
# undamped critical speed map
results_dict[k]["ucs_map"] = self._plot_ucs()
# campbell diagram
results_dict[k]["dcs_map"] = self._plot_campbell_diagram()
fig_mode_shape = []
fig_defl_shape = []
fig_unbalance = []
df_unbalance = []
for i, mode in enumerate([1, 3]):
# mode shape figures
fig_mode_shape.append(self._plot_mode_shape(mode))
# unbalance response figures and dataframe
fig, shapes, _dict = self._unbalance_response(mode)
fig_unbalance.append(fig)
fig_defl_shape.append(shapes)
df = pd.DataFrame(_dict).astype(object)
df_unbalance.append(df)
results_dict[k]["mode_shape"] = fig_mode_shape
results_dict[k]["unbalance_response"] = fig_unbalance
results_dict[k]["deflected_shape"] = fig_defl_shape
results_dict[k]["unbalace_summary"] = df_unbalance
# stability level 1 figures
results_dict[k]["stability_level1"] = self._stability_level_1()
# stability level 2 dataframe
if self.condition:
df_lvl2 = self._stability_level_2()
results_dict[k]["stability_level2"] = df_lvl2
else:
results_dict[k]["stability_level2"] = None
# Summary tables
results_dict[k]["summary"] = self._summary()
self.rotor = rotor
return results_dict
def _plot_campbell_diagram(self):
"""Plot Campbell Diagram.
This function will calculate the damped natural frequencies for a speed range.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
Example
-------
>>> import report as rp
>>> report = rp.report_example()
>>> fig = report._plot_campbell_diagram()
"""
units = self.config.run_campbell.frequency_units
fig = self.rotor.run_campbell(
speed_range=self.config.run_campbell.speed_range,
frequencies=self.config.run_campbell.num_modes,
).plot(harmonics=self.config.run_campbell.harmonics, frequency_units=units)
return fig
def _plot_static_analysis(self):
"""Run static analysis.
Static analysis calculates free-body diagram, deformed shaft, shearing
force diagram and bending moment diagram.
Returns
-------
figs : list
list of Plotly graph_objects.Figure() from Rotor.run_static()
Example
-------
>>> import report as rp
>>> report = rp.report_example()
>>> fig = report._plot_static_analysis()
"""
static = self.rotor.run_static()
figs = [
static.plot_free_body_diagram(),
static.plot_deformation(),
static.plot_shearing_force(),
static.plot_bending_moment(),
]
return figs
def _plot_ucs(self):
"""Plot undamped critical speed map.
This method will plot the undamped critical speed map for a given range
of stiffness values. If the range is not provided, the bearing
stiffness at rated speed will be used to create a range.
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
Example
-------
>>> import report as rp
>>> report = rp.report_example()
>>> fig = report._plot_ucs()
"""
max_speed = self.config.rotor_properties.rotor_speeds.max_speed
min_speed = self.config.rotor_properties.rotor_speeds.min_speed
oper_speed = self.config.rotor_properties.rotor_speeds.oper_speed
trip_speed = self.config.rotor_properties.rotor_speeds.trip_speed
units = self.config.rotor_properties.rotor_speeds.unit
frequency_units = self.config.plot_ucs.frequency_units
fig = self.rotor.plot_ucs(
stiffness_range=self.config.plot_ucs.stiffness_range,
num_modes=self.config.plot_ucs.num_modes,
num=self.config.plot_ucs.num,
synchronous=self.config.plot_ucs.synchronous,
stiffness_units=self.config.plot_ucs.stiffness_units,
frequency_units=frequency_units,
)
_speeds = [min_speed, max_speed, oper_speed, trip_speed]
speeds = [Q_(speed, units).to(frequency_units).m for speed in _speeds]
labels = ["min. speed", "max. speed", "rated speed", "trip speed"]
for speed, label in zip(speeds, labels):
fig.add_trace(
go.Scatter(
x=[min(fig.data[0].x), max(fig.data[0].x)],
y=[speed, speed],
mode="lines",
line=dict(color="black", dash="dot", width=2),
name=label,
hoverinfo="none",
showlegend=False,
yaxis="y2",
)
)
fig.update_layout(
yaxis2=dict(
ticktext=labels,
tickvals=speeds,
type="log",
matches="y",
anchor="x",
overlaying="y",
side="right",
),
)
return fig
def _static_forces(self):
"""Calculate the bearing reaction forces.
Returns
-------
Fb : list
Bearing reaction forces.
Example
-------
>>> import report as rp
>>> report = rp.report_example()
>>> report._static_forces()
array([44.09320349, 44.09320349])
"""
# get reaction forces on bearings
self.rotor.run_static()
Fb = list(self.rotor.bearing_forces_nodal.values())
Fb = np.array(Fb) / 9.8065
return Fb
def _unbalance_forces(self, mode):
"""Calculate the unbalance forces.
The unbalance forces are calculated base on the rotor type:
between_bearings :
The unbalance forces derives from the reaction bearing forces.
single_overung_l :
The unbalance forces derives from the disk's masses on the
shaft left end.
single_overung_r :
The unbalance forces derives from the disk's masses on the
shaft right end.
double_overung :
The unbalance forces derives from the disk's masses on the
shaft left and right ends.
Parameters
----------
mode : int
n'th mode shape.
Returns
-------
force : list
Unbalancing forces.
Example
-------
>>> import report as rp
>>> report = rp.report_example()
>>> report._unbalance_forces(mode=0)
array([0.04479869])
"""
N = self.config.rotor_properties.rotor_speeds.max_speed
unit = self.config.rotor_properties.rotor_speeds.unit
N = Q_(N, unit).to("rpm").m
if mode > 3:
raise ValueError(
"This module calculates only the response for the first "
"two backward and forward modes. "
)
# get reaction forces on bearings
if self.rotor_type == "between_bearings":
Fb = self._static_forces()
if mode == 0 or mode == 1:
if N < 25.000:
force = [max(6350e-6 * np.sum(Fb) / N, 254e-6 * np.sum(Fb))]
else:
force = [max(6350e-6 * np.sum(Fb) / 25000, 254e-6 * np.sum(Fb))]
if mode == 2 or mode == 3:
force = [max(6350e-6 * f / N, 254e-6 * f) for f in Fb]
# get disk masses
elif self.rotor_type == "single_overhung_l":
Wd = [
disk.m
for disk in self.rotor.disk_elements
if disk.n < min(self.rotor.df_bearings["n"])
]
Ws = [
sh.m
for sh in self.rotor.shaft_elements
if sh.n_l < min(self.rotor.df_bearings["n"])
]
W3 = np.sum(Wd + Ws)
if N < 25.000:
force = [6350e-6 * W3 / N]
else:
force = [6350e-6 * W3 / 25000]
elif self.rotor_type == "single_overhung_r":
Wd = [
disk.m
for disk in self.rotor.disk_elements
if disk.n > max(self.rotor.df_bearings["n"])
]
Ws = [
sh.m
for sh in self.rotor.shaft_elements
if sh.n_r > max(self.rotor.df_bearings["n"])
]
W3 = np.sum(Wd + Ws)
if N < 25.000:
force = [6350e-6 * W3 / N]
else:
force = [6350e-6 * W3 / 25000]
elif self.rotor_type == "double_overhung":
Wd_l = [
disk.m
for disk in self.rotor.disk_elements
if disk.n < min(self.rotor.df_bearings["n"])
]
Ws_l = [
sh.m
for sh in self.rotor.shaft_elements
if sh.n_l < min(self.rotor.df_bearings["n"])
]
Wd_r = [
disk.m
for disk in self.rotor.disk_elements
if disk.n > max(self.rotor.df_bearings["n"])
]
Ws_r = [
sh.m
for sh in self.rotor.shaft_elements
if sh.n_r > max(self.rotor.df_bearings["n"])
]
W3 = np.array([np.sum(Wd_l + Ws_l), np.sum(Wd_r + Ws_r)])
if N < 25.000:
force = 6350e-6 * W3 / N
else:
force = 6350e-6 * W3 / 25000
force = 2 * np.array(force)
return force
def _unbalance_response(self, mode):
"""Evaluate the unbalance response for the rotor.
This analysis takes the critical speeds of interest, calculates the
position and weight of the required unbalance and performs the analysis
including:
- Check if vibration at MCS is below the limit with the applied weight;
- Check if the clearances are ok if the vibration deteriorate to the
limit level;
Parameters
----------
mode : int
n'th mode shape.
Returns
-------
plot : Plotly graph_objects.make_subplots()
Plotly figure with Amplitude vs Frequency and Phase vs Frequency plots.
plot_shapes : list of Figures
List with Plotly figures of deflected shape graphs
unbalance_dict : dict
A dictionary with information about simulation parameters to be
displayed in the report. The dictionary contains:
- Mode number;
- Critical frequencies;
- Amplification factors;
- Separation margins (actual and required);
- Unbalance stations;
- Unbalance weights;
- Unbalance phases;
Example
-------
>>> import report as rp
>>> report = rp.report_example()
>>> fig, plot_shapes, unbalance_dict = report._unbalance_response(mode=0)
"""
maxspeed = self.config.rotor_properties.rotor_speeds.max_speed
minspeed = self.config.rotor_properties.rotor_speeds.min_speed
speed_factor = self.config.rotor_properties.rotor_speeds.speed_factor
speed_unit = self.config.rotor_properties.rotor_speeds.unit
plot_speeds = self.config.run_unbalance_response.plot_deflected_shape.speed
freq_range = self.config.run_unbalance_response.frequency_range
modes = self.config.run_unbalance_response.modes
cluster_points = self.config.run_unbalance_response.cluster_points
num_modes = self.config.run_unbalance_response.num_modes
num_points = self.config.run_unbalance_response.num_points
rtol = self.config.run_unbalance_response.rtol
frequency_units = self.config.run_unbalance_response.frequency_units
amplitude_units = self.config.run_unbalance_response.amplitude_units
phase_units = self.config.run_unbalance_response.phase_units
rotor_length_units = self.config.run_unbalance_response.rotor_length_units
if freq_range is None and not cluster_points:
freq_range = np.linspace(
0, speed_factor * Q_(maxspeed, speed_unit).to("rad/s").m, 201
)
maxspeed = Q_(maxspeed, speed_unit).to("rad/s").m
minspeed = Q_(minspeed, speed_unit).to("rad/s").m
# returns de nodes where forces will be applied
node_max, node_min = self._mode_shape(mode)
nodes = [int(node) for sub_nodes in [node_min, node_max] for node in sub_nodes]
_magnitude = self._unbalance_forces(mode)
phase = []
phase_angle = 0
for node in nodes:
phase.append(phase_angle)
phase_angle += np.pi
# fmt: off
response = self.rotor.run_unbalance_response(
nodes, _magnitude, phase, freq_range, modes,
cluster_points, num_modes, num_points, rtol,
)
# fmt: on
probe_nodes = self.config.run_unbalance_response.probes.node
probe_orientations = self.config.run_unbalance_response.probes.orientation
probe_unit = self.config.run_unbalance_response.probes.unit
probes = [(n, ort) for n, ort in zip(probe_nodes, probe_orientations)]
unbalance_dict = {"probe {}".format(i + 1): None for i in range(len(probes))}
k = 0
# fmt: off
plot = response.plot(
probes, probe_unit, frequency_units, amplitude_units, phase_units,
subplot_kwargs=dict(width=800, height=600),
)
# fmt: on
fig = response.plot_magnitude(
probes, probe_unit, frequency_units, amplitude_units
)
for j, data in enumerate(fig.data):
_dict = {
"Probe node": [probe_nodes[j]],
"Probe orientation": [np.round(probe_orientations[j], 2)],
"Critical frequencies": [],
"Amplification factor": [],
"Scale factor": [],
"Separation margin - ACTUAL": [],
"Separation margin - REQUIRED": [],
"Unbalance station(s)": nodes,
"Unbalance weight(s)": [
float(np.format_float_scientific(i, 2)) for i in _magnitude
],
"Unbalance phase(s)": np.round(phase, 2),
}
idx_max = argrelextrema(data.y, np.greater)[0].tolist()
wn = freq_range[idx_max]
for i, peak in enumerate(data.y[idx_max]):
peak_n = 0.707 * peak
peak_aux = np.linspace(peak_n, peak_n, len(freq_range))
idx = np.argwhere(np.diff(np.sign(peak_aux - data.y))).flatten()
idx = np.sort(np.append(idx, idx_max[i]))
# if speed range is not long enough to catch the magnitudes
try:
# catch the indexes rigth after and before the peak
idx_aux = [
list(idx).index(idx_max[i]) - 1,
list(idx).index(idx_max[i]) + 1,
]
idx = idx[idx_aux]
except IndexError:
idx = [list(idx).index(idx_max[i]) - 1, len(freq_range) - 1]
# Amplification Factor (AF) - API684 - SP6.8.2.1
AF = wn[i] / (freq_range[idx[1]] - freq_range[idx[0]])
# Separation Margin (SM) - API684 - SP6.8.2.10
if AF > 2.5 and wn[i] < minspeed:
SM = np.round(min([16, 17 * (1 - 1 / (AF - 1.5))]) / 100, 2)
SM_ref = np.round((minspeed - wn[i]) / wn[i], 2)
elif AF > 2.5 and wn[i] > maxspeed:
SM = np.round(min([26, 10 + 17 * (1 - 1 / (AF - 1.5))]) / 100, 2)
SM_ref = np.round((wn[i] - maxspeed) / maxspeed, 2)
else:
SM = "None"
SM_ref = "None"
# amplitude limit (A1) - API684 - SP6.8.2.11
A1 = 25.4 * np.sqrt(12000 / Q_(maxspeed, "rad/s").to("rpm")) * 1e-6
A1 = Q_(A1, "m").to(amplitude_units).m
Amax = max(data.y)
# Scale Factor (Scc) - API684 - SP6.8.2.11 / API617 - 4.8.2.11
_Scc = np.round(max(A1.m / Amax, 0.5), 2)
Scc = min(_Scc, 6.0)
_dict["Amplification factor"].append(np.round(AF, 2))
_dict["Scale factor"].append(Scc)
_dict["Separation margin - ACTUAL"].append(SM)
_dict["Separation margin - REQUIRED"].append(SM_ref)
_dict["Critical frequencies"].append(wn[i])
unbalance_dict["probe {}".format(k + 1)] = _dict
k += 1
maxspeed = Q_(maxspeed, "rad/s").to(frequency_units).m
minspeed = Q_(minspeed, "rad/s").to(frequency_units).m
customdata = [minspeed, maxspeed]
max_amplitude = np.amax(np.array([data.y for data in fig.data]))
plot.add_trace(
go.Scatter(
x=[minspeed, maxspeed, maxspeed, minspeed, minspeed],
y=[0, 0, max_amplitude, max_amplitude, 0],
customdata=customdata * 5,
mode="lines",
opacity=0.3,
fill="toself",
fillcolor=tableau_colors["green"],
line=dict(width=1.5, color=tableau_colors["green"]),
name="Operation Speed Range",
legendgroup="Operation Speed Range",
hoveron="points+fills",
showlegend=True,
hoverlabel=dict(bgcolor=tableau_colors["green"]),
hovertemplate=(
f"<b>min. speed: {customdata[0]:.1f}</b><br>"
+ f"<b>max. speed: {customdata[1]:.1f}</b>"
),
),
row=1,
col=1,
)
plot_shapes = [
response.plot_deflected_shape(
speed=speed,
frequency_units=frequency_units,
displacement_units=amplitude_units,
rotor_length_units=rotor_length_units,
)
for speed in plot_speeds
]
return plot, plot_shapes, unbalance_dict
def _mode_shape(self, mode):
"""Evaluate the mode shapes for the rotor.
This analysis presents the vibration mode for each critical speed.
The importance is to locate the critical node, where the displacement
is the greatest, then apply loads for unbalance response (stability
level 1)
Parameters
----------
mode : int
the n'th vibration mode
Returns
-------
node_min : int
Nodes where the maximum displacements occur
node_max : int
Nodes where the minimum displacements occur
Example
-------
>>> import report as rp
>>> report = rp.report_example()
>>> node_min, node_max = report._mode_shape(mode=0)
>>> node_min
array([], dtype=float64)
>>> node_max
array([3.])
"""
aux_df_disk = self.aux_df_disk
speed = self.config.rotor_properties.rotor_speeds.oper_speed
speed_unit = self.config.rotor_properties.rotor_speeds.unit
speed = Q_(speed, speed_unit).to("rad/s").m
modal = self.rotor.run_modal(speed=speed)
xn, yn, zn, xc, yc, zc_pos, nn = modal.calc_mode_shape(mode=mode)
# reduce 3D view to 2D view
theta = np.arctan(xn[0] / yn[0])
vn = xn * np.sin(theta) + yn * np.cos(theta)
# remove repetitive values from zn and vn
idx_remove = []
for i in range(1, len(zn)):
if zn[i] == zn[i - 1]:
idx_remove.append(i)
zn = np.delete(zn, idx_remove)
vn = np.delete(vn, idx_remove)
node_min = np.array([])
node_max = np.array([])
if self.rotor_type == "between_bearings":
aux_idx_max = argrelextrema(vn, np.greater, order=nn)[0].tolist()
aux_idx_min = argrelextrema(vn, np.less, order=nn)[0].tolist()
# verification of rigid modes
if len(aux_idx_max) == 0 and len(aux_idx_min) == 0:
idx_max = np.argmax(vn)
idx_min = np.argmin(vn)
# corrects the index by the removed points
for i in idx_remove:
if idx_min > i:
idx_min += 1
if idx_max > i:
idx_max += 1
node_max = np.round(np.array([idx_max]) / nn)
node_min = np.round(np.array([idx_min]) / nn)
if mode in [2, 3] and len(aux_idx_max) == 0:
aux_idx_max = [np.argmax(vn)]
if mode in [2, 3] and len(aux_idx_min) == 0:
aux_idx_max = [np.argmin(vn)]
if len(aux_idx_min) != 0:
idx_min = np.where(vn == min(vn[aux_idx_min]))[0].tolist()
# corrects the index by the removed points
for i in idx_remove:
if idx_min[0] > i:
idx_min[0] += 1
node_min = np.round(np.array(idx_min) / nn)
if len(aux_idx_max) != 0:
idx_max = np.where(vn == max(vn[aux_idx_max]))[0].tolist()
# corrects the index by the removed points
for i in idx_remove:
if idx_max[0] > i:
idx_max[0] += 1
node_max = np.round(np.array(idx_max) / nn)
elif self.rotor_type == "double_overhung":
node_max = [max(aux_df_disk["n"])]
node_min = [min(aux_df_disk["n"])]
elif self.rotor_type == "single_overhung_l":
node_min = [min(aux_df_disk["n"])]
elif self.rotor_type == "single_overhung_r":
node_max = [max(aux_df_disk["n"])]
return node_min, node_max
def _plot_mode_shape(self, mode):
"""Plot the mode shapes for the rotor.
Parameters
----------
mode : int
the n'th vibration mode
Returns
-------
fig : Plotly graph_objects.Figure()
The figure object with the plot.
Example
-------
>>> import report as rp
>>> report = rp.report_example()
>>> fig = report._plot_mode_shape(mode=0)
"""
nodes_pos = np.array(self.rotor.nodes_pos)
df_bearings = self.rotor.df_bearings
speed = self.config.rotor_properties.rotor_speeds.oper_speed
speed_unit = self.config.rotor_properties.rotor_speeds.unit
speed = Q_(speed, speed_unit).to("rad/s").m
modal = self.rotor.run_modal(speed=speed)
fig = modal.plot_mode_2d(
mode=mode, frequency_units=self.config.mode_shape.frequency_units
)
fig.add_trace(
go.Scatter(
x=nodes_pos[df_bearings["n"]],
y=np.zeros(len(df_bearings)),
mode="markers",
marker=dict(size=12, color=colors1[5]),
name="bearing_node",
showlegend=False,
hovertemplate="Bearing Position: %{x:.2f}",
)
)
pos0 = nodes_pos[min(df_bearings["n"])]
pos1 = nodes_pos[max(df_bearings["n"])]
fig.add_annotation(
x=np.mean(nodes_pos[df_bearings["n"]]),
y=0,
axref="x",
ayref="y",
xshift=0,
yshift=20,
text="<b>Bearing Span = {:.2f}</b>".format(pos1 - pos0),
font=dict(size=18),
showarrow=False,
)
for node in nodes_pos[df_bearings["n"]]:
fig.add_trace(
go.Scatter(
x=[node, node],
y=[-2, 2],
mode="lines",
line=dict(width=2.5, color=colors1[5], dash="dash"),
name="Span",
legendgroup="Span",
hoverinfo="none",
showlegend=False,
)
)
fig.update_yaxes(range=[-2, 2])
return fig
def _stability_level_1(self):
"""Stability analysis level 1.
This analysis consider a anticipated cross coupling QA based on
conditions at the normal operating point and the cross-coupling
required to produce a zero log decrement, Q0.
Components such as seals and impellers are not considered in this
analysis.
Attributes
----------
condition: bool
False: Stability Level 1 satisfies the analysis;
True: Stability Level 2 is required.
Return
------
fig1 : Plotly graph_objects.Figure()
Applied Cross-Coupled Stiffness vs. Log Decrement plot.
fig2 : Plotly graph_objects.Figure()
CSR vs. Mean Gas Density plot.
Example
-------
>>> import report as rp
>>> report = rp.report_example()
>>> fig1, fig2 = report._stability_level_1()
>>> report.Qa
3013.61482785762
"""
length_unit = self.config.stability_level1.length_unit
power_unit = self.config.stability_level1.power_unit
density_unit = self.config.stability_level1.density_unit
speed_unit = self.config.rotor_properties.rotor_speeds.unit
D = Q_(self.config.stability_level1.D, length_unit).to("m").m
H = Q_(self.config.stability_level1.H, length_unit).to("m").m
HP = Q_(self.config.stability_level1.rated_power, power_unit).to("hp").m
RHOs = (
Q_(self.config.stability_level1.rho_suction, density_unit).to("kg/m**3").m
)
RHOd = (
Q_(self.config.stability_level1.rho_discharge, density_unit).to("kg/m**3").m
)
rho_ratio = self.config.stability_level1.rho_ratio
machine_type = self.config.rotor_properties.rotor_id.type
self.MCS = self.config.rotor_properties.rotor_speeds.max_speed
speed = self.config.rotor_properties.rotor_speeds.oper_speed
speed = Q_(speed, speed_unit).to("rpm").m
if len(D) != len(H):
raise Exception(
"config.stability_level1.H and config.stability_level1.D"
"must have the same length."
)
steps = 11
C = 9.55
Qa = 0.0
cross_coupled_array = np.array([])
# Qa - Anticipated cross-coupling for compressors - API 684 - SP6.8.5.6
if machine_type == "compressor":
Bc = 3.0
Dc, Hc = D, H
for i, disk in enumerate(self.rotor.disk_elements):
if disk.n in self.disk_nodes:
qi = HP[i] * Bc * C * rho_ratio[i] / (Dc[i] * Hc[i] * speed)
Qi = np.linspace(0, 10 * qi, steps)
cross_coupled_array = np.append(cross_coupled_array, Qi)
Qa += qi
# Qa - Anticipated cross-coupling for turbines - API 684 - SP6.8.5.6
if machine_type == "turbine" or machine_type == "axial_flow":
Bt = 1.5
Dt, Ht = D, H
for i, disk in enumerate(self.rotor.disk_elements):
if disk.n in self.disk_nodes:
qi = (HP[i] * Bt * C) / (Dt[i] * Ht[i] * speed)
Qi = np.linspace(0, 10 * qi, steps)
cross_coupled_array = np.append(cross_coupled_array, Qi)
Qa += qi
# Defining cross-coupling range to 10*Qa - API 684 - SP6.8.5.8
Qi = np.linspace(0, 10 * Qa, steps)
cross_coupled_array = np.append(cross_coupled_array, Qi)
cross_coupled_array = cross_coupled_array.reshape(
[len(self.disk_nodes) + 1, steps]
).T
log_dec = np.zeros(len(cross_coupled_array))
# remove disks and seals from the rotor model
bearing_list = [
copy(b)
for b in self.rotor.bearing_elements
if not isinstance(b, SealElement)
]
# Applying cross-coupling on rotor mid-span
if self.rotor_type == "between_bearings":
for i, Q in enumerate(cross_coupled_array[:, -1]):
bearings = [copy(b) for b in bearing_list]
# cross-coupling introduced at the rotor mid-span
n = np.round(np.mean(self.rotor.nodes))
cross_coupling = BearingElement(n=int(n), kxx=0, cxx=0, kxy=Q, kyx=-Q)
bearings.append(cross_coupling)
aux_rotor = Rotor(self.rotor.shaft_elements, [], bearings)
modal = aux_rotor.run_modal(speed=speed * np.pi / 30)
non_backward = modal.whirl_direction() != "Backward"
log_dec[i] = modal.log_dec[non_backward][0]
# Applying cross-coupling for each disk - API 684 - SP6.8.5.9
else:
for i, Q in enumerate(cross_coupled_array[:, :-1]):
bearings = [copy(b) for b in bearing_list]
# cross-coupling introduced at overhung disks
for n, q in zip(self.disk_nodes, Q):
cross_coupling = BearingElement(n=n, kxx=0, cxx=0, kxy=q, kyx=-q)
bearings.append(cross_coupling)
aux_rotor = Rotor(self.rotor.shaft_elements, [], bearings)
modal = aux_rotor.run_modal(speed=speed * np.pi / 30)
non_backward = modal.whirl_direction() != "Backward"
log_dec[i] = modal.log_dec[non_backward][0]
# verifies if log dec is greater than zero to begin extrapolation
cross_coupled_Qa = cross_coupled_array[:, -1]
if log_dec[-1] > 0:
g = interp1d(
cross_coupled_Qa, log_dec, fill_value="extrapolate", kind="linear"
)
stiff = cross_coupled_Qa[-1] * (1 + 1 / (len(cross_coupled_Qa)))
k = 0
while g(stiff) > 0:
log_dec = np.append(log_dec, g(stiff))
cross_coupled_Qa = np.append(cross_coupled_Qa, stiff)
stiff += cross_coupled_Qa[-1] / (len(cross_coupled_Qa))
k += 1
if k > 10000:
break
Q0 = cross_coupled_Qa[-1]
else:
idx = min(range(len(log_dec)), key=lambda i: abs(log_dec[i]))
Q0 = cross_coupled_Qa[idx]
# Find value for log_dec corresponding to Qa
log_dec_a = log_dec[np.where(np.isclose(cross_coupled_Qa, Qa))][0]
# CSR - Critical Speed Ratio
maxspeed = Q_(self.MCS, speed_unit).to("rad/s").m
try:
crit_speed = self.rotor.run_critical_speed().wn[0]
except:
crit_speed = self.rotor.run_modal(speed=maxspeed).wn[0]
CSR = maxspeed / crit_speed
# RHO_mean - Average gas density
RHO_mean = (RHOd + RHOs) / 2
RHO = np.linspace(0, RHO_mean * 5, 501)
# CSR_boundary - function to define the CSR boundaries
CSR_boundary = np.piecewise(
RHO,
[RHO <= 16.53, RHO > 16.53, RHO == 60, RHO > 60],
[2.5, lambda RHO: (-0.0115 * RHO + 2.69), 2.0, 0.0],
)
# Plotting area
fig1 = go.Figure()
fig1.add_trace(
go.Scatter(
x=cross_coupled_Qa,
y=log_dec,
mode="lines",
showlegend=False,
hoverinfo="none",
)
)
fig1.add_trace(
go.Scatter(
x=[Qa],
y=[log_dec_a],
mode="markers",
name="<b>Qa: Anticipated cross-coupling</b>",
hoverinfo="none",
)
)
fig1.add_annotation(
x=Qa,
y=log_dec_a,
axref="x",
ayref="y",
xshift=15,
yshift=15,
text="<b>Qa</b>",
showarrow=False,
)
fig1.update_xaxes(
title_text="<b>Applied Cross-Coupled Stiffness, Q (N/m)</b>",
rangemode="nonnegative",
)
fig1.update_yaxes(title_text="<b>Log Dec</b>", rangemode="nonnegative")
fig1.update_layout(
title=dict(
text=(
"<b>Applied Cross-Coupled Stiffness vs. Log Decrement</b><br>"
+ "<b>(API 684 - SP 6.8.5.10)</b>"
)
)
)
fig2 = go.Figure()
fig2.add_annotation(
x=RHO_mean,
y=CSR,
axref="x",
ayref="y",
xshift=40,
yshift=0,
text="<b>{}</b>".format(self.tag),
showarrow=False,
)
for text, x, y in zip(["Region A", "Region B"], [30, 60], [1.20, 2.75]):
fig2.add_annotation(
x=x,
y=y,
axref="x",
ayref="y",
xshift=0,
yshift=0,
text=f"<b>{text}</b>",
opacity=0.4,
showarrow=False,
)
fig2.add_trace(
go.Scatter(
x=RHO,
y=CSR_boundary,
mode="lines",
showlegend=False,
hoverinfo="none",
xaxis="x",
)
)
fig2.add_trace(
go.Scatter(
x=0.062428 * RHO,
y=CSR_boundary,
mode="lines",
showlegend=False,
hoverinfo="none",
xaxis="x2",
)
)
fig2.add_trace(
go.Scatter(
x=[RHO_mean],
y=[CSR],
mode="markers",
name="<b>CSR: Critical Speed Ratio</b>",
hoverinfo="none",
xaxis="x",
)
)
fig2.update_xaxes(mirror=True)
fig2.update_yaxes(
title_text="<b>Maximum Critical Speed Ratio</b>",
rangemode="nonnegative",
domain=[0.1, 1],
)
fig2.update_layout(
xaxis=dict(
title_text="<b>kg/m³</b>",
rangemode="nonnegative",
overlaying="x2",
anchor="y",
),
xaxis2=dict(
title_text="<b>lb/ft³</b>",
rangemode="nonnegative",
anchor="free",
side="bottom",
position=0,
),
title=dict(
text=(
"<b>CSR vs. Mean Gas Density</b><br>"
+ "<b>(API 684 - SP 6.8.5.10)</b>"
)
),
)
# Level 1 screening criteria - API 684 - SP6.8.5.10
idx = min(range(len(RHO)), key=lambda i: abs(RHO[i] - RHO_mean))
if machine_type == "compressor":
if Q0 / Qa < 2.0:
condition = True
elif log_dec_a < 0.1:
condition = True
elif 2.0 < Q0 / Qa < 10.0 and CSR > CSR_boundary[idx]:
condition = True
else:
condition = False
if machine_type == "turbine" or machine_type == "axial_flow":
if log_dec_a < 0.1:
condition = True
else:
condition = False
# updating attributes
self.Q0 = Q0
self.Qa = Qa
self.log_dec_a = log_dec_a
self.CSR = CSR
self.Qratio = Q0 / Qa
self.crit_speed = crit_speed
self.rho_gas = RHO_mean
self.condition = condition
return fig1, fig2
def _stability_level_2(self):
"""Stability analysis level 2.
For the level 2 stability analysis additional sources that contribute
to the rotor stability shall be considered such as:
a) labyrinth seals;
b) damper seals;
c) impeller/blade flow aerodynamic effects;
d) internal friction.
Returns
-------
df_logdec: pd.DataFrame
A dataframe relating the logarithmic decrement for each case analyzed.
Example
-------
>>> import report as rp
>>> report = rp.report_example()
>>> dataframe = report._stability_level_2()
"""
oper_speed = self.config.rotor_properties.rotor_speeds.oper_speed
unit = self.config.rotor_properties.rotor_speeds.unit
oper_speed = Q_(oper_speed, unit).to("rad/s").m
# Build a list of seals
seal_list = [
copy(b) for b in self.rotor.bearing_elements if isinstance(b, SealElement)
]
bearing_list = [
copy(b)
for b in self.rotor.bearing_elements
if not isinstance(b, SealElement)
]
log_dec_seal = []
log_dec_disk = []
log_dec_full = []
data_seal = {}
data_disk = {}
data_rotor = {}
# Evaluate log dec for each component - Disks
if len(self.rotor.disk_elements):
for disk in self.rotor.disk_elements:
aux_rotor = Rotor(
shaft_elements=self.rotor.shaft_elements,
disk_elements=[disk],
bearing_elements=bearing_list,
)
modal = aux_rotor.run_modal(speed=oper_speed)
non_backward = modal.whirl_direction() != "Backward"
log_dec_disk.append(modal.log_dec[non_backward][0])
# Evaluate log dec for group bearings + disks
disk_tags = [
"Shaft + Bearings + " + disk.tag for disk in self.rotor.disk_elements
]
# Evaluate log dec for group bearings + all disks
if len(self.rotor.disk_elements) > 1:
all_disks_tag = " + ".join(
[disk.tag for disk in self.rotor.disk_elements]
)
disk_tags.append("Shaft + Bearings + " + all_disks_tag)
aux_rotor = Rotor(
shaft_elements=self.rotor.shaft_elements,
disk_elements=self.rotor.disk_elements,
bearing_elements=bearing_list,
)
modal = aux_rotor.run_modal(speed=oper_speed)
non_backward = modal.whirl_direction() != "Backward"
log_dec_disk.append(modal.log_dec[non_backward][0])
data_disk = {"tags": disk_tags, "log_dec": log_dec_disk}
# Evaluate log dec for each component - Seals
if len(seal_list):
for seal in seal_list:
bearings_seal = deepcopy(bearing_list)
bearings_seal.append(seal)
aux_rotor = Rotor(
shaft_elements=self.rotor.shaft_elements,
disk_elements=[],
bearing_elements=bearings_seal,
)
modal = aux_rotor.run_modal(speed=oper_speed)
non_backward = modal.whirl_direction() != "Backward"
log_dec_seal.append(modal.log_dec[non_backward][0])
seal_tags = ["Shaft + Bearings + " + seal.tag for seal in seal_list]
if len(seal_list) > 1:
# Evaluate log dec for group bearings + seals
all_seals_tag = " + ".join([seal.tag for seal in seal_list])
seal_tags.append("Shaft + Bearings + " + all_seals_tag)
aux_rotor = Rotor(
shaft_elements=self.rotor.shaft_elements,
disk_elements=[],
bearing_elements=self.rotor.bearing_elements,
)
modal = aux_rotor.run_modal(speed=oper_speed)
non_backward = modal.whirl_direction() != "Backward"
log_dec_seal.append(modal.log_dec[non_backward][0])
data_seal = {"tags": seal_tags, "log_dec": log_dec_seal}
# Evaluate log dec for all components
modal = self.rotor.run_modal(speed=oper_speed)
non_backward = modal.whirl_direction() != "Backward"
log_dec_full.append(modal.log_dec[non_backward][0])
rotor_tags = [self.tag]
data_rotor = {"tags": rotor_tags, "log_dec": log_dec_full}
df_logdec_disk = pd.DataFrame(data_disk)
df_logdec_seal = pd.DataFrame(data_seal)
df_logdec_full = pd.DataFrame(data_rotor)
df_logdec = pd.concat([df_logdec_disk, df_logdec_seal, df_logdec_full])
df_logdec = df_logdec.reset_index(drop=True)
self.df_logdec_disk = df_logdec_disk
self.df_logdec_seal = df_logdec_seal
self.df_logdec_full = df_logdec_full
self.df_logdec = df_logdec
return df_logdec
def _summary(self):
"""Return datarfreames for Report summary.
This method will create dataframes with relevant info about the report.
Returns
-------
df_stab_lvl1 : pd.DataFrame
Dataframe with stability level 1 results
df_stab_lvl2 : pd.DataFrame
Dataframe with stability level 2 results
Example
-------
>>> import report as rp
>>> report = rp.report_example()
>>> stability1 = report._stability_level_1()
>>> stability2 = report._stability_level_2()
>>> df_lvl1, df_lvl2 = report._summary()
"""
machine_type = self.config.rotor_properties.rotor_id.type
stab_lvl1_data = dict(
tags=[self.tag],
machine_type=[machine_type],
Q0=[self.Q0],
Qa=[self.Qa],
log_dec_a=[self.log_dec_a],
Qratio=[self.Qratio],
crti_speed=[self.crit_speed],
MCS=[self.MCS],
CSR=[self.CSR],
RHO_gas=[self.rho_gas],
)
df_stab_lvl1 = | pd.DataFrame(stab_lvl1_data) | pandas.DataFrame |
# import all the required files i.e. numpy , pandas and math library
from graphlib.financialGraph import Data
import numpy as np
import pandas as pd
from pandas import DataFrame , Series
import math
# All the indicators are defined and arranged in Alphabetical order
# ------------------> A <------------------------
# [0] __ Average True Range (ATR)
# Moving Average of True Range(TR)
def atr(data: DataFrame, period: int = 14) -> Series:
TR = tr(data)
return pd.Series(
TR.rolling(center=False, window=period,
min_periods=1).mean(),
name=f'{period} ATR'
)
# [0] __ Adaptive Price Zone (APZ)
# TODO
def apz(data: DataFrame,period: int = 21,dev_factor: int = 2,
MA: Series = None,adjust: bool = True,) -> DataFrame:
if not isinstance(MA, pd.Series):
MA = dema(data, period)
price_range = pd.Series(
(data["high"] - data["low"]).ewm(span=period, adjust=adjust).mean()
)
volatility_value = pd.Series(
price_range.ewm(span=period, adjust=adjust).mean(), name="vol_val"
)
upper_band = pd.Series((volatility_value * dev_factor) + MA, name="UPPER")
lower_band = pd.Series(MA - (volatility_value * dev_factor), name="LOWER")
return pd.concat([upper_band, lower_band], axis=1)
# ------------------> B <------------------------
# [0] __ Bollinger Bands (BBANDS)
# TODO
def bbands(data: DataFrame,period: int = 20,MA: Series = None,
column: str = "close",std_multiplier: float = 2,) -> DataFrame:
std = data[column].rolling(window=period).std()
if not isinstance(MA, pd.core.series.Series):
middle_band = pd.Series(sma(data, period), name="BB_MIDDLE")
else:
middle_band = pd.Series(MA, name="BB_MIDDLE")
upper_bb = pd.Series(middle_band + (std_multiplier * std), name="BB_UPPER")
lower_bb = pd.Series(middle_band - (std_multiplier * std), name="BB_LOWER")
return pd.concat([upper_bb, middle_band, lower_bb], axis=1)
# [0] __ Bollinger Bands Width (BBWidth)
# TODO
def bbwidth(
data: DataFrame, period: int = 20, MA: Series = None, column: str = "close"
) -> Series:
BB = bbands(data, period, MA, column)
return pd.Series(
(BB["BB_UPPER"] - BB["BB_LOWER"]) / BB["BB_MIDDLE"],
name="{0} period BBWITH".format(period),
)
# ------------------> D <------------------------
# [0] __ Double Exponential Moving Average (DEMA)
# 2 * EWMA - ewm(EWMA)
def dema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
DEMA = (
2*ema(data,period) - ema(data,period).ewm(span=period , adjust=adjust).mean()
)
return pd.Series(
DEMA ,
name = f'{period}_DEMA'
)
# [0] __ Directional Movement Index (DMI)
# TODO
def dmi(data: DataFrame, column: str = "close", adjust: bool = True) -> Series:
def _get_time(close):
sd = close.rolling(5).std()
asd = sd.rolling(10).mean()
v = sd / asd
t = 14 / v.round()
t[t.isna()] = 0
t = t.map(lambda x: int(min(max(x, 5), 30)))
return t
def _dmi(index):
time = t.iloc[index]
if (index - time) < 0:
subset = data.iloc[0:index]
else:
subset = data.iloc[(index - time) : index]
return rsi(subset, period=time, adjust=adjust).values[-1]
dates = Series(data.index)
periods = Series(range(14, len(dates)), index=dates.index[14:].values)
t = _get_time(data[column])
return periods.map(lambda x: _dmi(x))
# ------------------> E <------------------------
# [0] __ Exponential Weighted Moving Average (EWMA) or Exponential Moving Average(EMA)
# Exponential average of prev n day prices
def ema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
return pd.Series(
data[column].ewm(span=period, adjust=adjust).mean(),
name = f'{period}_EMA'
)
# [0] __ Kaufman Efficiency indicator (KER) or (ER)
# change in price / volatility Here change and volatility are absolute
def er(data : DataFrame,period: int = 10,column: str ='close') -> Series:
change = data[column].diff(period).abs()
volatility = data[column].diff().abs().rolling(window=period,min_periods=1).sum()
return pd.Series(change / volatility,
name=f'{period}_ER'
)
# [0] __ TODO (EVSTC)
# TODO
def evstc(data: DataFrame,period_fast: int = 12,period_slow: int = 30,
k_period: int = 10,d_period: int = 3,adjust: bool = True) -> Series:
ema_slow = evwma(data, period_slow)
ema_fast = evwma(data, period_fast)
macd = ema_fast - ema_slow
STOK = pd.Series((
(macd - macd.rolling(window=k_period).min())
/ (macd.rolling(window=k_period).max() - macd.rolling(window=k_period).min())
) * 100)
STOD = STOK.rolling(window=d_period).mean()
STOD_DoubleSmooth = STOD.rolling(window=d_period).mean()
return pd.Series(STOD_DoubleSmooth, name="{0} period EVSTC".format(k_period))
# [0] __ Elastic Volume Weighted Moving Average (EVWMA)
# x is ((volume sum for n period) - volume ) divided by (volume sum for n period)
# y is volume * close / (volume sum for n period)
def evwma(data, period: int = 20) -> Series:
vol_sum = (data["volume"].rolling(window=period,min_periods=1).sum())
x = (vol_sum - data["volume"]) / vol_sum
y = (data["volume"] * data["close"]) / vol_sum
evwma = [0]
for x, y in zip(x.fillna(0).iteritems(), y.iteritems()):
if x[1] == 0 or y[1] == 0:
evwma.append(0)
else:
evwma.append(evwma[-1] * x[1] + y[1])
return pd.Series(
evwma[1:], index=data.index,
name=f'{period}_EVWMA'
)
# [0] __ Elastic Volume Weighted Moving average convergence divergence (EV_MACD)
# MACD calculation on basis of Elastic Volume Weighted Moving average (EVWMA)
def ev_macd(data: DataFrame,period_fast: int = 20,period_slow: int = 40,
signal: int = 9,adjust: bool = True,) -> DataFrame:
evwma_slow = evwma(data, period_slow)
evwma_fast = evwma(data, period_fast)
MACD = pd.Series(evwma_fast - evwma_slow, name="EV MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
# ------------------> F <------------------------
# [0] __ Fisher Transform
# TODO
def fish(data: DataFrame, period: int = 10, adjust: bool = True) -> Series:
from numpy import log, seterr
seterr(divide="ignore")
med = (data["high"] + data["low"]) / 2
ndaylow = med.rolling(window=period).min()
ndayhigh = med.rolling(window=period).max()
raw = (2 * ((med - ndaylow) / (ndayhigh - ndaylow))) - 1
smooth = raw.ewm(span=5, adjust=adjust).mean()
_smooth = smooth.fillna(0)
return pd.Series(
(log((1 + _smooth) / (1 - _smooth))).ewm(span=3, adjust=adjust).mean(),
name="{0} period FISH.".format(period),
)
# [0] __ Fractal Adaptive Moving Average (FRAMA)
# TODO
def FRAMA(data: DataFrame, period: int = 16, batch: int=10) -> Series:
assert period % 2 == 0, print("FRAMA period must be even")
c = data.close.copy()
window = batch * 2
hh = c.rolling(batch).max()
ll = c.rolling(batch).min()
n1 = (hh - ll) / batch
n2 = n1.shift(batch)
hh2 = c.rolling(window).max()
ll2 = c.rolling(window).min()
n3 = (hh2 - ll2) / window
# calculate fractal dimension
D = (np.log(n1 + n2) - np.log(n3)) / np.log(2)
alp = np.exp(-4.6 * (D - 1))
alp = np.clip(alp, .01, 1).values
filt = c.values
for i, x in enumerate(alp):
cl = c.values[i]
if i < window:
continue
filt[i] = cl * x + (1 - x) * filt[i - 1]
return pd.Series(filt, index=data.index,
name= f'{period} FRAMA'
)
# [0] __ Finite Volume Element (FVE)
# TODO
def fve(data: DataFrame, period: int = 22, factor: int = 0.3) -> Series:
hl2 = (data["high"] + data["low"]) / 2
tp_ = tp(data)
smav = data["volume"].rolling(window=period).mean()
mf = pd.Series((data["close"] - hl2 + tp_.diff()), name="mf")
_mf = pd.concat([data["close"], data["volume"], mf], axis=1)
def vol_shift(row):
if row["mf"] > factor * row["close"] / 100:
return row["volume"]
elif row["mf"] < -factor * row["close"] / 100:
return -row["volume"]
else:
return 0
_mf["vol_shift"] = _mf.apply(vol_shift, axis=1)
_sum = _mf["vol_shift"].rolling(window=period).sum()
return pd.Series((_sum / smav) / period * 100)
# ------------------> H <------------------------
# [0] __ Hull Moving Average (HMA)
# wma of change in wma where change in wma is 2 * (wma half period) - (wma full period)
def hma(data, period: int = 16) -> Series:
half_length = int(period / 2)
sqrt_length = int(math.sqrt(period))
wmaf = wma(data, period=half_length)
wmas = wma(data, period=period)
data["deltawma"] = 2 * wmaf - wmas
hma = wma(data, column="deltawma", period=sqrt_length)
return pd.Series(hma, name=f'{period}_HMA')
# ------------------> I <------------------------
# [0] __ Ichimoku Cloud
# TODO
def ichimoku(data: DataFrame,tenkan_period: int = 9,kijun_period: int = 26,
senkou_period: int = 52,chikou_period: int = 26,) -> DataFrame:
tenkan_sen = pd.Series(
(
data["high"].rolling(window=tenkan_period).max()
+ data["low"].rolling(window=tenkan_period).min()
)
/ 2,
name="TENKAN",
) ## conversion line
kijun_sen = pd.Series(
(
data["high"].rolling(window=kijun_period).max()
+ data["low"].rolling(window=kijun_period).min()
)
/ 2,
name="KIJUN",
) ## base line
senkou_span_a = pd.Series(
((tenkan_sen + kijun_sen) / 2), name="senkou_span_a"
) .shift(kijun_period) ## Leading span
senkou_span_b = pd.Series(
(
(
data["high"].rolling(window=senkou_period).max()
+ data["low"].rolling(window=senkou_period).min()
)
/ 2
),
name="SENKOU",
).shift(kijun_period)
chikou_span = pd.Series(
data["close"].shift(-chikou_period),
name="CHIKOU",
)
return pd.concat(
[tenkan_sen, kijun_sen, senkou_span_a, senkou_span_b, chikou_span], axis=1
)
# [0] __ Inverse Fisher Transform (IFTRSI)
# TODO
def ift_rsi(data: DataFrame,column: str = "close",rsi_period: int = 5,
wma_period: int = 9,) -> Series:
v1 = pd.Series(0.1 * (rsi(data, rsi_period) - 50), name="v1")
d = (wma_period * (wma_period + 1)) / 2
weights = np.arange(1, wma_period + 1)
def linear(w):
def _compute(x):
return (w * x).sum() / d
return _compute
_wma = v1.rolling(wma_period, min_periods=wma_period)
v2 = _wma.apply(linear(weights), raw=True)
return pd.Series(
((v2 ** 2 - 1) / (v2 ** 2 + 1)),
name="IFT_RSI"
)
# ------------------> K <------------------------
# [0] __ Kaufman's Adaptive Moving Average (KAMA)
# first KAMA is SMA
# Current KAMA = Previous KAMA + smoothing_constant * (Price - Previous KAMA)
def kama(data,er_: int = 10,ema_fast: int = 2,
ema_slow: int = 30,period: int = 20,
column: str ='close') -> Series:
er_ = er(data)
fast_alpha = 2 / (ema_fast + 1)
slow_alpha = 2 / (ema_slow + 1)
sc = pd.Series(
(er_ * (fast_alpha - slow_alpha) + slow_alpha) ** 2,
name="smoothing_constant",
)
sma = pd.Series(
data[column].rolling(period).mean(), name="SMA"
)
kama = []
for s, ma, price in zip(
sc.iteritems(), sma.shift().iteritems(), data[column].iteritems()
):
try:
kama.append(kama[-1] + s[1] * (price[1] - kama[-1]))
except (IndexError, TypeError):
if pd.notnull(ma[1]):
kama.append(ma[1] + s[1] * (price[1] - ma[1]))
else:
kama.append(None)
sma["KAMA"] = pd.Series(
kama, index=sma.index, name=f'{period}_KAMA')
return sma['KAMA']
# [0] __ Keltner Channels (KC)
# TODO
def kc(ohlc: DataFrame,period: int = 20,atr_period: int = 10,
MA: Series = None,kc_mult: float = 2,) -> DataFrame:
if not isinstance(MA, pd.core.series.Series):
middle = pd.Series(ema(ohlc, period), name="KC_MIDDLE")
else:
middle = pd.Series(MA, name="KC_MIDDLE")
up = pd.Series(middle + (kc_mult * atr(ohlc, atr_period)), name="KC_UPPER")
down = pd.Series(
middle - (kc_mult * atr(ohlc, atr_period)), name="KC_LOWER"
)
return pd.concat([up, down], axis=1)
# ------------------> M <------------------------
# [0] __ Moving average convergence divergence (MACD)
# MACD is Difference of ema fast and ema slow
# Here fast period is 12 and slow period is 26
# MACD Signal is ewm of MACD
def macd(data,period_fast: int = 12,period_slow: int = 26,
signal: int = 9,column: str = "close",adjust: bool = True
) -> DataFrame:
EMA_fast = pd.Series(
data[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name=f'{period_fast}_EMA_fast')
EMA_slow = pd.Series(
data[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name=f'{period_slow}_EMA_slow')
MACD = pd.Series(EMA_fast - EMA_slow,name='MACD')
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(),name=f'{signal}_SIGNAL'
)
DIFF = pd.Series(
MACD - MACD_signal,
name="diff MACD_MSIGNAL"
)
return pd.concat(
[DIFF, MACD, MACD_signal ],
axis=1
)
# [0] __ Moving Standard Deviation (MSD)
# Standard deviation of a given period for the column passed as arguement
def msd(data: DataFrame, period: int = 21, column: str = "close") -> Series:
return pd.Series(data[column].rolling(period).std(), name="MSD")
# Momentum Breakout Bands (MOBO)
# TODO
def mobo(data: DataFrame,period: int = 10,std_multiplier: float = 0.8,
column: str = "close",) -> DataFrame:
BB = bbands(data, period=10, std_multiplier=0.8, column=column)
return BB
# [0] __ Market momentum (MOM)
def mom(data: DataFrame, period: int = 10, column: str = "close") -> Series:
return pd.Series(data[column].diff(period),
name=f'{period}_MOM'
)
# [0] __ Moving Volume Weighted Average Price (MVWAP)
# SMA of (close * volume ) divided by SMA of volume
def mvwap(data: DataFrame, period:int = 9) -> Series:
data["cv"] =(data["close"] * data["volume"])
return pd.Series(
(sma(data,period = period,column = "cv")/sma(data,period=period,column="volume")),
name="MVWAP."
)
# ------------------> P <------------------------
# ------------|| Pivot ||------------------------
# [0] __ Pivot Camarilla
# TODO
def pivot_camarilla(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
s2 = df_['close']-(1.1*(df_['high']-df_['low'])/6)
s3 = df_['close']-(1.1*(df_['high']-df_['low'])/4)
s4 =df_['close']-(1.1*(df_['high']-df_['low'])/2)
r1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
r2 = df_['close']+(1.1*(df_['high']-df_['low'])/6)
r3 =df_['close']+(1.1*(df_['high']-df_['low'])/4)
r4 = df_['close']+(1.1*(df_['high']-df_['low'])/2)
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Classic
# TODO
def pivot_classic(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = (pivot * 2) - df_["high"]
s2 = pivot - (df_["high"] - df_["low"])
s3 = pivot - 2*(df_["high"] - df_["low"])
s4 = pivot - 3*(df_["high"] - df_["low"])
r1 = (pivot * 2) - df_["low"]
r2 = pivot + (df_["high"] - df_["low"])
r3 = pivot + 2*(df_["high"] - df_["low"])
r4 = pivot + 3*(df_["high"] - df_["low"])
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Demark
# TODO
def pivot_demark(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot,s1,r1=[],[],[]
for i in range(len(df_)):
if df_['open'][i]==df_['close'][i]:
x=df_['high'][i]+df_['low'][i]+2*df_['close'][i]
elif df_['close'][i]>df_['open'][i]:
x=2*df_['high'][i]+df_['low'][i]+df_['close'][i]
else:
x=df_['high'][i]+2*df_['low'][i]+df_['close'][i]
pivot.append(x/4)
s1.append(x/2 - df_["high"][i])
r1.append(x/2 - df_["low"][i])
data_ = pd.DataFrame(pivot,columns=['pivot'])
data_['s1']=s1
data_['r1']=r1
return data_
# [0] __ Pivot Fibonacci
# TODO
def pivot_fibonacci(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = pivot - ((df_["high"] - df_["low"])*0.382)
s2 = pivot - ((df_["high"] - df_["low"])*0.618)
s3 = pivot - (df_["high"] - df_["low"])
s4 = pivot + ((df_["high"] - df_["low"])*1.382)
r1 = pivot + ((df_["high"] - df_["low"])*0.382)
r2 = pivot + ((df_["high"] - df_["low"])*0.618)
r3 =pivot + (df_["high"] - df_["low"])
r4 = pivot + (df_["high"] - df_["low"])*1.382
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Traditional
# TODO
def pivot_traditional(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = (pivot * 2) - df_["high"]
s2 = pivot - (df_["high"] - df_["low"])
s3 = df_["low"] - (2 * (df_["high"] - pivot))
s4 = df_["low"] - (3 * (df_["high"] - pivot))
s5 = df_["low"] - (4 * (df_["high"] - pivot))
r1 = (pivot * 2) - df_["low"]
r2 = pivot + (df_["high"] - df_["low"])
r3 = df_["high"] + (2 * (pivot - df_["low"]))
r4 = df_["high"] + (3 * (pivot - df_["low"]))
r5 = df_["high"] + (4 * (pivot - df_["low"]))
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
| pd.Series(s2, name="s2") | pandas.Series |
import pandas as pd
import numpy as np
#from pandas_datareader import data as web
import matplotlib.pyplot as plt
from datetime import datetime, date, time, timedelta
import os
import string
# import datetime
def fix_bittrex_dates(df_crypto):
#set an index by dates
df_crypto['Date'] = pd.to_datetime(df_crypto['Date'], format="%Y-%m-%d %I-%p", errors='coerce')
df_crypto.index = df_crypto['Date']
return df_crypto
def read_bittrex(crypto_path):
df_crypto = pd.read_csv(crypto_path, low_memory=False, header=1)
print(df_crypto.tail()) # Show data loading in
return fix_bittrex_dates(df_crypto)
def zip_dates(start_date_1, start_date_2, num_periods, time_freq, date_format):
dates = zip(pd.date_range(start=start_date_1, periods=num_periods, freq=time_freq).format(formatter=lambda x: x.strftime(date_format)),pd.date_range(start=start_date_2, periods=num_periods, freq=time_freq).format(formatter=lambda x: x.strftime(date_format)))
return dates
def crypto_plot(thisdf, crypto_market, metric_time_delta):
# thisdf.index = thisdf['Date']
price = thisdf['Close']
ma = price.rolling(metric_time_delta).mean()
mstd = price.rolling(metric_time_delta).std()
plt.figure(2,figsize=(12,6))
fill_plt = plt.fill_between(mstd.index, ma-2*mstd, ma+2*mstd, color='b', alpha=0.2)
plt.plot(price.index, price, 'k', ma.index, ma, 'b')
plt.grid()
# plt.xlabel('Hours')
plt.ylabel(''.join((crypto_market.replace('.csv','')[8:14],)))
plt.title(''.join((crypto_market.replace('.csv',''),' ',str(pd.to_datetime(thisdf.Date.values[0])).replace(':','-'), ' to ',str(pd.to_datetime(thisdf.Date.values[-1])))).replace('_',' '))
plt.savefig(''.join(('output/',crypto_market.replace('.csv',''),' ',str(pd.to_datetime(thisdf.Date.values[0])).replace(':','-'), ' to ',str( | pd.to_datetime(thisdf.Date.values[-1]) | pandas.to_datetime |
import pandas as pd
import random
import sklearn.preprocessing as pp
from datetime import datetime
import itertools
import re
from scipy.stats import entropy
import uuid
import pickle
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
import sqlite3
from collections import Counter
import numpy as np
from sklearn.linear_model import SGDClassifier
pd.options.display.max_columns = 50
pd.options.display.max_colwidth = 200
pd.options.display.max_colwidth = 200
pd.set_option('display.max_rows', None)
RND_SEED = 45822
random.seed(RND_SEED)
np.random.seed(RND_SEED)
connection = sqlite3.connect('database.db', timeout=30)
with open("schema.sql") as f:
connection.executescript(f.read())
def get_db_connection():
conn = sqlite3.connect('database.db', timeout=30)
conn.row_factory = sqlite3.Row
return conn
def populate_texts_table_sql(texts_list, table_name="texts", reset_labels=True):
conn = get_db_connection()
cur = conn.cursor()
cur.execute("DELETE FROM " + table_name + ";")
conn.commit()
if reset_labels:
for text_record in texts_list:
cur.execute("INSERT INTO " + table_name + " (id, text, label) VALUES (?, ?, ?)",
(text_record["id"], text_record["text"], "-"))
else:
for text_record in texts_list:
cur.execute("INSERT INTO " + table_name + " (id, text, label) VALUES (?, ?, ?)",
(text_record["id"], text_record["text"], text_record["label"]))
conn.commit()
conn.close()
return None
def get_decimal_value(name):
conn = get_db_connection()
query = "SELECT name, value FROM decimalValues WHERE name = '%s' ;" % name
sql_table = conn.execute(query).fetchall()
decimal_value = [dict(row)["value"] for row in sql_table][0]
conn.close()
return decimal_value
def set_decimal_value(name, value):
conn = get_db_connection()
query = "UPDATE decimalValues SET value = %s WHERE name = '%s' ;" % (value, name)
conn.execute(query)
conn.commit()
conn.close()
return None
def update_overall_quality_scores(value):
current_score = get_decimal_value(name="OVERALL_QUALITY_SCORE_DECIMAL")
set_decimal_value(name="OVERALL_QUALITY_SCORE_DECIMAL_PREVIOUS", value=current_score)
set_decimal_value(name="OVERALL_QUALITY_SCORE_DECIMAL", value=value)
return None
def set_pkl(name, pkl_data, reset=False):
conn = get_db_connection()
cur = conn.cursor()
if not reset:
test_query = cur.execute('SELECT * FROM pkls WHERE name = ?', (name,)).fetchall()
if len(test_query) > 0:
cur.execute('DELETE FROM pkls WHERE name = ?', (name,))
query = "INSERT INTO pkls (name, data) VALUES (?, ?)"
pkl_data_ = pickle.dumps(pkl_data)
cur.execute(query, (name, pkl_data_))
# test_query = cur.execute('SELECT * FROM pkls WHERE name = ?', (name,)).fetchall()
# test_data = pickle.loads([dict(row)["data"] for row in test_query][0])
else:
cur.execute("DELETE FROM pkls WHERE name = '" + name + "';")
conn.commit()
conn.close()
return None
def get_pkl(name):
try:
conn = get_db_connection()
cur = conn.cursor()
query = "SELECT * FROM pkls WHERE name = '" + name + "';"
pkl_table = cur.execute(query).fetchall()
data = [dict(row)["data"] for row in pkl_table]
if len(data) > 0:
pkl_data = pickle.loads(data[0])
else:
pkl_data = None
conn.close()
return pkl_data
except:
return None
def get_text_list(table_name="texts"):
conn = get_db_connection()
text_list_sql = conn.execute("SELECT * FROM " + table_name).fetchall()
text_list_sql = [dict(row) for row in text_list_sql]
conn.close()
return text_list_sql
def set_text_list(label, table_name="searchResults"):
conn = get_db_connection()
conn.execute("UPDATE " + table_name + " SET label = '" + label + "'")
conn.commit()
conn.close()
return None
def clear_text_list(table_name="searchResults"):
conn = get_db_connection()
cur = conn.cursor()
cur.execute("DELETE FROM " + table_name + ";")
conn.commit()
conn.close()
return None
def get_appropriate_text_list_list(text_list_full_sql, total_pages_sql, search_results_length_sql, table_limit_sql):
if search_results_length_sql > 0:
text_list_full_sql = get_text_list(table_name="searchResults")
total_pages_sql = get_variable_value(name="SEARCH_TOTAL_PAGES")
text_list_list_sql = create_text_list_list(text_list_full_sql=text_list_full_sql, sub_list_limit=table_limit_sql)
return text_list_list_sql, text_list_full_sql, total_pages_sql
def get_y_classes():
conn = get_db_connection()
y_classes_sql = conn.execute('SELECT className FROM yClasses;').fetchall()
y_classes_sql = [dict(row)["className"] for row in y_classes_sql]
conn.close()
return y_classes_sql
def clear_y_classes():
conn = get_db_connection()
conn.execute('DELETE FROM yClasses;')
conn.commit()
conn.close()
return []
def add_y_classes(y_classses_list, begin_fresh=True):
conn = get_db_connection()
cur = conn.cursor()
if begin_fresh:
cur.execute("DELETE FROM yClasses;")
for i, value in enumerate(y_classses_list):
cur.execute("INSERT INTO yClasses (classId, className) VALUES (?, ?)", (i, value))
conn.commit()
conn.close()
return 1
def get_click_log():
conn = get_db_connection()
sql_table = \
conn.execute('SELECT click_id, click_location, click_type, click_object, click_date_time FROM clickRecord;').fetchall()
click_log_sql = list()
for row in sql_table:
dict_row = {"click_id": dict(row)["click_id"], "click_location": dict(row)["click_location"],
"click_type": dict(row)["click_type"], "click_object": dict(row)["click_object"],
"click_date_time" : dict(row)["click_date_time"]}
click_log_sql.append(dict_row)
conn.close()
return click_log_sql
def get_value_log():
conn = get_db_connection()
sql_table = \
conn.execute('SELECT click_id, value_type, value FROM valueRecord;').fetchall()
click_log_sql = list()
for row in sql_table:
dict_row = {"click_id": dict(row)["click_id"], "value_type": dict(row)["value_type"],
"value": dict(row)["value"]}
click_log_sql.append(dict_row)
conn.close()
return click_log_sql
def reset_log_click_record_sql():
conn = get_db_connection()
conn.execute("DELETE FROM clickRecord")
conn.commit()
conn.close()
return None
def reset_log_click_value_sql():
conn = get_db_connection()
conn.execute("DELETE FROM valueRecord")
conn.commit()
conn.close()
return None
def add_log_click_record_sql(records):
conn = get_db_connection()
cur = conn.cursor()
for record in records:
cur.execute("""INSERT INTO clickRecord (click_id, click_location, click_type, click_object, click_date_time)
VALUES (?, ?, ?, ?, ?)""", (record["click_id"], record["click_location"], record["click_type"],
record["click_object"], record["click_date_time"]))
conn.commit()
conn.close()
return None
def add_log_click_value_sql(records):
conn = get_db_connection()
cur = conn.cursor()
for record in records:
cur.execute("""INSERT INTO valueRecord (click_id, value_type, value)
VALUES (?, ?, ?)""", (record["click_id"], record["value_type"], record["value"]))
conn.commit()
conn.close()
return None
def get_panel_flags():
conn = get_db_connection()
sql_table = conn.execute('SELECT name, value FROM initializeFlags;').fetchall()
panel_flags = {dict(row)["name"]: dict(row)["value"] for row in sql_table}
conn.close()
return panel_flags
def update_panel_flags_sql(update_flag):
conn = get_db_connection()
cur = conn.cursor()
update_query = "UPDATE initializeFlags SET value = ? WHERE name = ?;"
for name, value in update_flag.items():
cur.execute(update_query, (value, name))
conn.commit()
conn.close()
return None
def get_texts_group_x(table_name="group1Texts"):
conn = get_db_connection()
sql_table = conn.execute("SELECT id, text, label FROM " + table_name + ";").fetchall()
conn.close()
if len(sql_table) > 0:
texts_group_2 = [{"id": dict(row)["id"], "text": dict(row)["text"], "label": dict(row)["label"]} for row in sql_table]
else:
texts_group_2 = []
return texts_group_2
def set_texts_group_x(top_texts, table_name="group1Texts"):
conn = get_db_connection()
cur = conn.cursor()
cur.execute("DELETE FROM " + table_name + ";")
if top_texts:
for record in top_texts:
cur.execute("INSERT INTO " + table_name + " (id, text, label) VALUES (?, ?, ?)",
(record["id"], record["text"], record["label"]))
conn.commit()
conn.close()
return None
def get_total_summary_sql():
conn = get_db_connection()
sql_table = conn.execute('SELECT name, number, percentage FROM totalSummary;').fetchall()
total_summary = [{"name": dict(row)["name"],
"number": dict(row)["number"],
"percentage": dict(row)["percentage"]} for row in sql_table]
conn.close()
return total_summary
def set_total_summary(text_lists):
labels = [text_obj["label"] for text_obj in text_lists]
label_counter = Counter(labels)
total_texts = len(text_lists)
number_unlabeled = label_counter["-"]
number_labeled = total_texts - number_unlabeled
total_texts_percentage = "100.00%"
if total_texts > 0:
number_unlabeled_percentage = "{:.2%}".format(number_unlabeled / total_texts)
number_labeled_percentage = "{:.2%}".format(number_labeled / total_texts)
else:
number_unlabeled_percentage = "{:.2%}".format(1.0)
number_labeled_percentage = "{:.2%}".format(0.0)
total_summary = list()
total_summary.append({"name": "Total Texts",
"number": "{:,}".format(total_texts),
"percentage": total_texts_percentage})
total_summary.append({"name": "Total Unlabeled",
"number": "{:,}".format(number_unlabeled),
"percentage": number_unlabeled_percentage})
total_summary.append({"name": "Total Labeled",
"number": "{:,}".format(number_labeled),
"percentage": number_labeled_percentage})
conn = get_db_connection()
cur = conn.cursor()
cur.execute("DELETE FROM totalSummary;")
for record in total_summary:
cur.execute("INSERT INTO totalSummary (name, number, percentage) VALUES (?, ?, ?)",
(record["name"], record["number"], record["percentage"]))
conn.commit()
conn.close()
return None
def get_label_summary_sql():
conn = get_db_connection()
sql_table = conn.execute('SELECT name, number, percentage FROM labelSummary;').fetchall()
label_summary = [{"name": dict(row)["name"],
"number": dict(row)["number"],
"percentage": dict(row)["percentage"]} for row in sql_table]
conn.close()
return label_summary
def set_label_summary(text_lists):
labels = [text_obj["label"] for text_obj in text_lists]
label_counter = Counter(labels)
total_texts = len(text_lists)
label_summary = []
for key, value in label_counter.items():
label_summary.append({"name": key,
"number": "{:,}".format(value),
"percentage": "{:.2%}".format(value / total_texts)})
conn = get_db_connection()
cur = conn.cursor()
cur.execute("DELETE FROM labelSummary;")
for record in label_summary:
cur.execute("INSERT INTO labelSummary (name, number, percentage) VALUES (?, ?, ?)",
(record["name"], record["number"], record["percentage"]))
conn.commit()
conn.close()
return None
def get_selected_text(selected_text_id, text_list_full_sql):
selected_text_test = [text["text"] for text in text_list_full_sql if text["id"] == selected_text_id]
if selected_text_id:
if len(selected_text_test) == 0:
selected_text = ""
else:
selected_text = selected_text_test[0]
else:
selected_text = ""
return selected_text
def create_text_list_list(text_list_full_sql, sub_list_limit):
texts_list_list = \
[text_list_full_sql[i:i + sub_list_limit] for i in range(0, len(text_list_full_sql), sub_list_limit)]
return texts_list_list
def update_texts_list_by_id_sql(update_objs=None, selected_label=None, update_ids=None, sub_list_limit=10,
update_in_place=True):
conn = get_db_connection()
cur = conn.cursor()
if selected_label and update_ids and not update_objs:
if update_in_place:
update_query = "UPDATE texts SET label = ? WHERE id IN (%s)" % ",".join("?"*len(update_ids))
update_values = [selected_label]
update_values.extend(update_ids)
cur.execute(update_query, update_values)
conn.commit()
conn.close()
else:
cur.execute("DROP TABLE IF EXISTS temp_table;")
cur.execute("""
CREATE TABLE temp_table (
id TEXT NOT NULL,
text TEXT NOT NULL,
label TEXT NOT NULL
);
""")
query = "INSERT INTO temp_table SELECT * FROM texts WHERE id IN (%s)" % ",".join("?" * len(update_ids))
cur.execute(query, update_ids)
cur.execute("UPDATE temp_table SET label = ?", (selected_label, ))
cur.execute("DELETE FROM texts WHERE id IN (%s)" % ",".join("?" * len(update_ids)), update_ids)
cur.execute("INSERT INTO texts SELECT * FROM temp_table;")
conn.commit()
conn.close()
elif update_objs and not selected_label and not update_ids:
if update_in_place:
labels = set([obj["label"] for obj in update_objs])
for label in labels:
update_ids = [obj["id"] for obj in update_objs if obj["label"] == label]
update_ids_sql = ", ".join(update_ids)
update_query = "UPDATE texts SET label = ? WHERE id IN (%s)" % update_ids_sql
conn.execute(update_query, (label, ))
conn.commit()
conn.close()
else:
cur.execute("DROP TABLE IF EXISTS temp_table;")
cur.execute("""
CREATE TABLE temp_table (
id TEXT NOT NULL,
text TEXT NOT NULL,
label TEXT NOT NULL
);
""")
all_update_ids = [obj["id"] for obj in update_objs]
query = "INSERT INTO temp_table SELECT * FROM texts WHERE id IN (%s)" % ",".join("?" * len(all_update_ids))
cur.execute(query, all_update_ids)
labels = set([obj["label"] for obj in update_objs])
for label in labels:
update_ids = [obj["id"] for obj in update_objs if obj["label"] == label]
update_ids_sql = ", ".join(update_ids)
update_query = "UPDATE temp_table SET label = ? WHERE id IN (%s)" % update_ids_sql
conn.execute(update_query, (label,))
delete_query = "DELETE FROM texts WHERE id IN (%s)" % ",".join("?" * len(all_update_ids))
cur.execute(delete_query, all_update_ids)
cur.execute("INSERT INTO texts SELECT * FROM temp_table;")
conn.commit()
conn.close()
text_list_full = get_text_list(table_name="texts")
texts_list_list = create_text_list_list(text_list_full_sql=text_list_full, sub_list_limit=sub_list_limit)
return text_list_full, texts_list_list
def label_all_sql(fitted_classifier, sparse_vectorized_corpus, corpus_text_ids, texts_list,
label_only_unlabeled=True, sub_list_limit=50, update_in_place=True):
texts_list_df = pd.DataFrame(texts_list)
if not label_only_unlabeled:
predictions = fitted_classifier.predict(sparse_vectorized_corpus)
predictions_df = pd.DataFrame(predictions)
predictions_df["id"] = corpus_text_ids
labeled_text_ids = corpus_text_ids
number_to_label = len(labeled_text_ids)
else:
label_only_these_ids = texts_list_df[texts_list_df["label"] == "-"]["id"].values
keep_indices = [corpus_text_ids.index(x) for x in label_only_these_ids]
number_to_label = len(keep_indices)
if number_to_label > 0:
if label_only_unlabeled:
sparse_vectorized_corpus_alt = sparse_vectorized_corpus[keep_indices, :]
predictions = fitted_classifier.predict(sparse_vectorized_corpus_alt)
predictions_df = pd.DataFrame(predictions)
predictions_df["id"] = label_only_these_ids
labeled_text_ids = label_only_these_ids
predictions_df = predictions_df.rename(columns={0: "label"})
predictions_df = predictions_df.merge(texts_list_df[["id", "text"]], left_on="id", right_on="id",
how="left")
predictions_df = predictions_df[["id", "text", "label"]]
update_objects = predictions_df.to_dict("records")
text_list_full, texts_list_list = \
update_texts_list_by_id_sql(update_objs=update_objects,
selected_label=None,
update_ids=None,
sub_list_limit=sub_list_limit,
update_in_place=update_in_place)
else:
text_list_full = get_text_list(table_name="texts")
texts_list_list = create_text_list_list(text_list_full_sql=text_list_full, sub_list_limit=sub_list_limit)
labeled_text_ids = []
return text_list_full, texts_list_list, labeled_text_ids
def generate_summary_sql(text_lists):
labels = [text_obj["label"] for text_obj in text_lists]
label_counter = Counter(labels)
total_texts = len(text_lists)
number_unlabeled = label_counter["-"]
number_labeled = total_texts - number_unlabeled
set_total_summary(text_lists=text_lists)
set_label_summary(text_lists=text_lists)
set_variable(name="NUMBER_UNLABELED_TEXTS", value=number_unlabeled)
summary_headline = \
"Total Labeled : {:,} / {:,} {:.1%}".format(number_labeled, total_texts, number_labeled / total_texts)
set_variable(name="LABEL_SUMMARY_STRING", value=summary_headline)
total_summary_sql = get_total_summary_sql()
label_summary_sql = get_label_summary_sql()
number_unlabeled_texts_sql = number_unlabeled
label_summary_string_sql = summary_headline
return total_summary_sql, label_summary_sql, number_unlabeled_texts_sql, label_summary_string_sql
def set_variable(name, value):
conn = get_db_connection()
cur = conn.cursor()
test_query = cur.execute('SELECT * FROM variables WHERE name = ?', (name,)).fetchall()
if len(test_query) > 0:
cur.execute('DELETE FROM variables WHERE name = ?', (name,))
query = """INSERT INTO variables (name, value) VALUES (?, ?)
"""
else:
query = """INSERT INTO variables (name, value) VALUES (?, ?)
"""
cur.execute(query, (name, value))
conn.commit()
conn.close()
return 1
def get_variable_value(name):
conn = get_db_connection()
cur = conn.cursor()
query = cur.execute('SELECT value FROM variables WHERE name = ?', (name,)).fetchall()
value = [dict(row)["value"] for row in query]
value = value[0]
if name in ["TOTAL_PAGES", "NUMBER_UNLABELED_TEXTS", "MAX_CONTENT_PATH", "TEXTS_LIMIT", "TABLE_LIMIT",
"MAX_FEATURES", "RND_STATE", "PREDICTIONS_NUMBER", "SEARCH_RESULTS_LENGTH", "GROUP_1_KEEP_TOP",
"GROUP_3_KEEP_TOP", "CONFIRM_LABEL_ALL_TEXTS_COUNTS", "SEARCH_TOTAL_PAGES", "LABEL_ALL_BATCH_NO",
"LABEL_ALL_TOTAL_BATCHES", "NUMBER_AUTO_LABELED", "LABEL_ALL_BATCH_SIZE"]:
value = int(value)
if name in ["KEEP_ORIGINAL", "GROUP_1_EXCLUDE_ALREADY_LABELED", "GROUP_2_EXCLUDE_ALREADY_LABELED",
"PREDICTIONS_VERBOSE", "SIMILAR_TEXT_VERBOSE", "FIT_CLASSIFIER_VERBOSE", "FIRST_LABELING_FLAG",
"FULL_FIT_IF_LABELS_GOT_OVERRIDDEN", "FORCE_FULL_FIT_FOR_DIFFICULT_TEXTS",
"LABELS_GOT_OVERRIDDEN_FLAG", "UPDATE_TEXTS_IN_PLACE"]:
if value == "True":
value = True
else:
value = False
if name in ["PREDICTIONS_PROBABILITY"]:
value = float(value)
conn.commit()
conn.close()
return value
def get_difficult_texts_sql():
try:
conn = get_db_connection()
select_diff_texts_query = get_variable_value(name="SELECT_DIFF_TEXTS_QUERY")
sql_cols_list_y_classes = get_pkl(name="SQL_COLS_LIST_Y_CLASSES")
sql_table = conn.execute(select_diff_texts_query).fetchall()
total_summary = list()
for row in sql_table:
temp_row = {col: dict(row)[col] for col in sql_cols_list_y_classes}
total_summary.append(temp_row)
conn.close()
return total_summary
except:
return []
def reset_difficult_texts_sql():
try:
conn = get_db_connection()
cur = conn.cursor()
cur.execute('DELETE FROM difficultTexts')
conn.commit()
conn.close()
return None
except:
return None
def get_available_datasets():
conn = get_db_connection()
cur = conn.cursor()
cur.execute("DELETE FROM availableDatasets;")
cur.execute("INSERT INTO availableDatasets SELECT * FROM fixedDatasets;")
conn.commit()
conn.close()
dataset_name, dataset_url, date_time, y_classes, total_summary = has_save_data()
if dataset_name and date_time and y_classes and total_summary:
date_at_end_check = re.findall(r"(.*)\-[0-9]{4}\-[0-9]{2}\-[0-9]{2}\-\-[0-9]{2}\-[0-9]{2}\-[0-9]{2}", dataset_name)
if len(date_at_end_check) > 0:
dataset_name_alt = date_at_end_check[0]
else:
dataset_name_alt = dataset_name
conn = get_db_connection()
cur = conn.cursor()
cur.execute("INSERT INTO availableDatasets (name, description, url) VALUES (?, ?, ?)",
(dataset_name_alt + "-" + date_time,
"A partially labeled dataset having " + total_summary[2]["percentage"] +
" of " + total_summary[0]["number"] + " texts labeled.",
dataset_url))
conn.commit()
conn.close()
conn = get_db_connection()
available_datasets_sql = conn.execute('SELECT * FROM availableDatasets').fetchall()
conn.close()
return available_datasets_sql
def has_save_data():
try:
dataset_name = get_pkl(name="DATASET_NAME")
dataset_url = get_pkl(name="DATASET_URL")
date_time = get_pkl(name="DATE_TIME")
y_classes = get_pkl(name="Y_CLASSES")
total_summary = get_pkl(name="TOTAL_SUMMARY")
return dataset_name, dataset_url, date_time, y_classes, total_summary
except:
return None, None, None, None, None
def get_all_predictions_sql(fitted_classifier, sparse_vectorized_corpus, corpus_text_ids, texts_list,
top=5,
y_classes=["earthquake", "fire", "flood", "hurricane"],
verbose=False,
round_to=2,
format_as_percentage=False):
predictions = fitted_classifier.predict_proba(sparse_vectorized_corpus)
predictions_df = pd.DataFrame(predictions)
y_classes = [x.replace(" ", "_") for x in y_classes]
predictions_df.columns = y_classes
predictions_summary = predictions_df.replace(0.0, np.NaN).mean(axis=0)
predictions_df["id"] = corpus_text_ids
texts_list_df = pd.DataFrame(texts_list)
predictions_df = predictions_df.merge(texts_list_df, left_on="id", right_on="id")
keep_cols = ["id", "text"]
keep_cols.extend(y_classes)
predictions_df = predictions_df[keep_cols]
pred_scores = score_predictions(predictions_df[y_classes], use_entropy=True, num_labels=len(y_classes))
overall_quality = np.mean(pred_scores)
overall_quality_score_decimal_sql = overall_quality
predictions_df["pred_scores"] = pred_scores
if round_to and not format_as_percentage:
predictions_df[y_classes] = predictions_df[y_classes].round(round_to)
predictions_summary = predictions_summary.round(round_to)
overall_quality = overall_quality.round(round_to)
if format_as_percentage:
if verbose:
print(">> get_all_predictions > predictions_df.head() :")
print(predictions_df.head(top))
predictions_df[y_classes] = predictions_df[y_classes]\
.astype(float)\
.applymap(lambda x: "{0:.0%}".format(x))
# predictions_summary = (predictions_summary.astype(float) * 100).round(1).astype(str) + "%"
overall_quality = (overall_quality.astype(float) * 100).round(1).astype(str) + "%"
predictions_df = predictions_df.sort_values(["pred_scores"], ascending=[True])
if verbose:
print(">> get_all_predictions > predictions_df.head() :")
print(predictions_df.head(top))
print(">> get_all_predictions > predictions_df.tail() :")
print(predictions_df.tail(top))
keep_cols = ["id", "text"]
keep_cols.extend(y_classes)
sql_cols_list = [x + ' TEXT NOT NULL' for x in keep_cols]
sql_cols = ", ".join(sql_cols_list)
top_texts = predictions_df.head(top)[keep_cols].to_dict("records")
sql_query_1 = """
DROP TABLE IF EXISTS difficultTexts;
"""
sql_query_2 = """
CREATE TABLE difficultTexts (
""" + sql_cols + """
);
"""
conn = get_db_connection()
cur = conn.cursor()
cur.execute(sql_query_1)
conn.commit()
cur.execute(sql_query_2)
conn.commit()
parameters = ", ".join(["?"] * len(keep_cols))
query = "INSERT INTO difficultTexts (" + ", ".join(keep_cols) + ") VALUES (%s)" % parameters
for record in top_texts:
insert_values = [value for key, value in record.items()]
cur.execute(query, (insert_values))
conn.commit()
conn.close()
conn = get_db_connection()
select_diff_texts_query = "SELECT " + ", ".join(keep_cols) + " FROM difficultTexts;"
set_variable(name="SELECT_DIFF_TEXTS_QUERY", value=select_diff_texts_query)
set_pkl(name="SQL_COLS_LIST_Y_CLASSES", pkl_data=keep_cols, reset=None)
sql_table = conn.execute(select_diff_texts_query).fetchall()
texts_group_3_sql = []
for row in sql_table:
texts_group_3_sql.append({key: value for key, value in dict(row).items()})
conn.close()
update_overall_quality_scores(value=overall_quality_score_decimal_sql)
set_variable(name="OVERALL_QUALITY_SCORE", value=overall_quality)
overall_quality_score_sql = overall_quality
overall_quality_score_decimal_previous_sql = get_decimal_value(name="OVERALL_QUALITY_SCORE_DECIMAL_PREVIOUS")
return texts_group_3_sql, overall_quality_score_sql, \
overall_quality_score_decimal_sql, overall_quality_score_decimal_previous_sql
def get_top_predictions_sql(selected_class, fitted_classifier, sparse_vectorized_corpus, corpus_text_ids,
texts_list,
top=5,
cutoff_proba=0.95,
y_classes=["earthquake", "fire", "flood", "hurricane"],
verbose=False,
exclude_already_labeled=True):
predictions = fitted_classifier.predict_proba(sparse_vectorized_corpus)
predictions_df = pd.DataFrame(predictions)
predictions_df.columns = y_classes
predictions_df["id"] = corpus_text_ids
keep_cols = ["id"]
keep_cols.extend([selected_class])
predictions_df = predictions_df[keep_cols]
predictions_df = predictions_df[predictions_df[selected_class] > cutoff_proba]
predictions_df = predictions_df.sort_values([selected_class], ascending=False)
if exclude_already_labeled:
texts_list_df = pd.DataFrame.from_dict(texts_list)
predictions_df = predictions_df.merge(texts_list_df, left_on="id", right_on="id", how="left")
predictions_df = predictions_df[predictions_df["label"].isin(["-"])]
if verbose:
print(">> get_top_predictions > predictions_df :")
print(predictions_df.head(top))
filter_list = predictions_df.head(top)["id"].values
top_texts = filter_all_texts(texts_list, filter_list, exclude_already_labeled=False)
set_texts_group_x(top_texts=top_texts, table_name="group2Texts")
texts_group_3_sql = get_texts_group_x(table_name="group2Texts")
return texts_group_3_sql
def fit_classifier_sql(sparse_vectorized_corpus, corpus_text_ids, texts_list, texts_list_labeled,
y_classes=["earthquake", "fire", "flood", "hurricane"],
verbose=False,
random_state=2584,
n_jobs=-1,
labels_got_overridden_flag=False,
full_fit_if_labels_got_overridden=False):
texts_list_labeled_df = | pd.DataFrame.from_dict(texts_list_labeled) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
from datetime import timedelta
import numpy as np
from pandas._libs import tslibs, lib
from pandas._libs.tslibs import Timedelta, NaT
from pandas._libs.tslibs.fields import get_timedelta_field
from pandas._libs.tslibs.timedeltas import array_to_timedelta64
from pandas import compat
from pandas.core.dtypes.common import (
_TD_DTYPE, _ensure_int64, is_timedelta64_dtype)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import isna
import pandas.core.common as com
from pandas.tseries.offsets import Tick, DateOffset
from pandas.tseries.frequencies import to_offset
from .datetimelike import DatetimeLikeArrayMixin
def _is_convertible_to_td(key):
return isinstance(key, (Tick, timedelta,
np.timedelta64, compat.string_types))
def _field_accessor(name, alias, docstring=None):
def f(self):
values = self.asi8
result = get_timedelta_field(values, alias)
if self.hasnans:
result = self._maybe_mask_results(result, convert='float64')
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class TimedeltaArrayMixin(DatetimeLikeArrayMixin):
@property
def _box_func(self):
return lambda x: Timedelta(x, unit='ns')
@property
def dtype(self):
return _TD_DTYPE
# ----------------------------------------------------------------
# Constructors
_attributes = ["freq"]
@classmethod
def _simple_new(cls, values, freq=None, **kwargs):
values = np.array(values, copy=False)
if values.dtype == np.object_:
values = array_to_timedelta64(values)
if values.dtype != _TD_DTYPE:
if is_timedelta64_dtype(values):
# non-nano unit
values = values.astype(_TD_DTYPE)
else:
values = _ensure_int64(values).view(_TD_DTYPE)
result = object.__new__(cls)
result._data = values
result._freq = freq
return result
def __new__(cls, values, freq=None, start=None, end=None, periods=None,
closed=None):
if (freq is not None and not isinstance(freq, DateOffset) and
freq != 'infer'):
freq = to_offset(freq)
if periods is not None:
if lib.is_float(periods):
periods = int(periods)
elif not lib.is_integer(periods):
raise TypeError('`periods` must be a number, got {periods}'
.format(periods=periods))
if values is None:
if freq is None and com._any_none(periods, start, end):
raise ValueError('Must provide freq argument if no data is '
'supplied')
else:
return cls._generate(start, end, periods, freq,
closed=closed)
result = cls._simple_new(values, freq=freq)
if freq == 'infer':
inferred = result.inferred_freq
if inferred:
result._freq = to_offset(inferred)
return result
@classmethod
def _generate(cls, start, end, periods, freq, closed=None, **kwargs):
# **kwargs are for compat with TimedeltaIndex, which includes `name`
if com._count_not_none(start, end, periods, freq) != 3:
raise ValueError('Of the four parameters: start, end, periods, '
'and freq, exactly three must be specified')
if start is not None:
start = Timedelta(start)
if end is not None:
end = Timedelta(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
if freq is not None:
index = _generate_regular_range(start, end, periods, freq)
index = cls._simple_new(index, freq=freq, **kwargs)
else:
index = np.linspace(start.value, end.value, periods).astype('i8')
# TODO: shouldn't we pass `name` here? (via **kwargs)
index = cls._simple_new(index, freq=freq)
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return index
# ----------------------------------------------------------------
# Arithmetic Methods
def _add_offset(self, other):
assert not isinstance(other, Tick)
raise TypeError("cannot add the type {typ} to a {cls}"
.format(typ=type(other).__name__,
cls=type(self).__name__))
def _sub_datelike(self, other):
assert other is not NaT
raise TypeError("cannot subtract a datelike from a {cls}"
.format(cls=type(self).__name__))
def _add_delta(self, delta):
"""
Add a timedelta-like, Tick, or TimedeltaIndex-like object
to self.
Parameters
----------
delta : timedelta, np.timedelta64, Tick, TimedeltaArray, TimedeltaIndex
Returns
-------
result : same type as self
Notes
-----
The result's name is set outside of _add_delta by the calling
method (__add__ or __sub__)
"""
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
elif isinstance(delta, TimedeltaArrayMixin):
new_values = self._add_delta_tdi(delta)
elif is_timedelta64_dtype(delta):
# ndarray[timedelta64] --> wrap in TimedeltaArray/Index
delta = type(self)(delta)
new_values = self._add_delta_tdi(delta)
else:
raise TypeError("cannot add the type {0} to a TimedeltaIndex"
.format(type(delta)))
return type(self)(new_values, freq='infer')
def _evaluate_with_timedelta_like(self, other, op):
if isinstance(other, ABCSeries):
# GH#19042
return NotImplemented
opstr = '__{opname}__'.format(opname=op.__name__).replace('__r', '__')
# allow division by a timedelta
if opstr in ['__div__', '__truediv__', '__floordiv__']:
if _is_convertible_to_td(other):
other = Timedelta(other)
if isna(other):
raise NotImplementedError(
"division by pd.NaT not implemented")
i8 = self.asi8
left, right = i8, other.value
if opstr in ['__floordiv__']:
result = op(left, right)
else:
result = op(left, np.float64(right))
result = self._maybe_mask_results(result, convert='float64')
return result
return NotImplemented
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timedelta methods
def total_seconds(self):
"""
Return total duration of each element expressed in seconds.
This method is available directly on TimedeltaArray, TimedeltaIndex
and on Series containing timedelta values under the ``.dt`` namespace.
Returns
-------
seconds : [ndarray, Float64Index, Series]
When the calling object is a TimedeltaArray, the return type
is ndarray. When the calling object is a TimedeltaIndex,
the return type is a Float64Index. When the calling object
is a Series, the return type is Series of type `float64` whose
index is the same as the original.
See Also
--------
datetime.timedelta.total_seconds : Standard library version
of this method.
TimedeltaIndex.components : Return a DataFrame with components of
each Timedelta.
Examples
--------
**Series**
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))
>>> s
0 0 days
1 1 days
2 2 days
3 3 days
4 4 days
dtype: timedelta64[ns]
>>> s.dt.total_seconds()
0 0.0
1 86400.0
2 172800.0
3 259200.0
4 345600.0
dtype: float64
**TimedeltaIndex**
>>> idx = pd.to_timedelta(np.arange(5), unit='d')
>>> idx
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
>>> idx.total_seconds()
Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0],
dtype='float64')
"""
return self._maybe_mask_results(1e-9 * self.asi8)
def to_pytimedelta(self):
"""
Return Timedelta Array/Index as object ndarray of datetime.timedelta
objects
Returns
-------
datetimes : ndarray
"""
return tslibs.ints_to_pytimedelta(self.asi8)
days = _field_accessor("days", "days",
" Number of days for each element. ")
seconds = _field_accessor("seconds", "seconds",
" Number of seconds (>= 0 and less than 1 day) "
"for each element. ")
microseconds = _field_accessor("microseconds", "microseconds",
"\nNumber of microseconds (>= 0 and less "
"than 1 second) for each\nelement. ")
nanoseconds = _field_accessor("nanoseconds", "nanoseconds",
"\nNumber of nanoseconds (>= 0 and less "
"than 1 microsecond) for each\nelement.\n")
@property
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = ['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds', 'nanoseconds']
hasnans = self.hasnans
if hasnans:
def f(x):
if | isna(x) | pandas.core.dtypes.missing.isna |
# -*- coding: utf-8 -*-
'''The plotting module.
Contents:
show_template
show
piechart
histogram
scatter
timeseries
dendrogram
feature_importances
'''
import pandas as pd
import numpy as np
from bokeh.models import (ColumnDataSource, HoverTool,
Slider, RangeSlider, CheckboxGroup, DateRangeSlider,
Range1d, CDSView, Plot, MultiLine,
Circle, TapTool, BoxZoomTool, ResetTool, SaveTool)
from bokeh.models.widgets import DataTable, TableColumn, Dropdown
from bokeh.models.graphs import from_networkx, NodesAndLinkedEdges
from bokeh.layouts import column, row
import bokeh.layouts as layouts
from bokeh.plotting import figure
from bokeh.io import output_notebook
from bokeh.io.export import get_screenshot_as_png
import bokeh.io as io
from math import pi
from bokeh.palettes import Category20, Spectral4
from henchman.learning import _raw_feature_importances
from henchman.learning import create_model
from sklearn.metrics import (roc_auc_score, precision_score,
recall_score, f1_score, roc_curve)
import networkx as nx
def show_template():
'''Prints a template for `show`. See :func:`show <henchman.plotting.show>` for details.
Example:
>>> import henchman.plotting as hplot
>>> hplot.show_template()
'''
print('show(plot,\n'
' static=False,\n'
' png=False,\n'
' hover=False,\n'
' colors=None,\n'
' width=None,\n'
' height=None,\n'
' title=\'Temporary title\',\n'
' x_axis=\'my xaxis name\',\n'
' y_axis=\'my yaxis name\',\n'
' x_range=(0, 10) or None,\n'
' y_range=(0, 10) or None)\n')
return None
def _modify_plot(plot, figargs):
'''Add text and modify figure attributes. This is an internal
function which allows for figure attributes to be passed into
interactive functions.
Args:
plot (bokeh.figure): The figure to modify
figargs (dict[assorted]): A dictionary of width, height,
title, x_axis, y_axis, x_range and y_range.
'''
if figargs['width'] is not None:
plot.width = figargs['width']
if figargs['height'] is not None:
plot.height = figargs['height']
if figargs['title'] is not None:
plot.title.text = figargs['title']
if figargs['x_axis'] is not None:
plot.xaxis.axis_label = figargs['x_axis']
if figargs['y_axis'] is not None:
plot.yaxis.axis_label = figargs['y_axis']
if figargs['x_range'] is not None:
plot.x_range = Range1d(figargs['x_range'][0], figargs['x_range'][1])
if figargs['y_range'] is not None:
plot.y_range = Range1d(figargs['y_range'][0], figargs['y_range'][1])
return plot
def show(plot, png=False, static=False, hover=True,
width=None, height=None,
title=None, x_axis=None, y_axis=None,
x_range=None, y_range=None, colors=None,
fig=False):
'''Format and show a bokeh plot.
This is a wrapper around bokeh show which can add common
plot attributes like height, axis labels and whether or not
you would like the output as a png. This function also runs
the bokeh function ``output_notebook()`` to start.
You can get a full list of options by function with ``show_template()``.
Args:
plot (function): The plot to show.
static (bool): If True, show a static bokeh plot.
hover (bool): If True, show the hovertool. Default is True.
width (int, optional): Plot width.
height (int, optional): Plot height.
title (str, optional): The title for the plot.
x_axis (str, optional): The x_axis label.
y_axis (str, optional): The y_axis label.
x_range (tuple[int, int], optional): A min and max x value to plot.
y_range (tuple[int, int], optional): A min and max y value to plot.
colors (list[str], optional): A list of colors to use for the plot.
png (bool): If True, return a png of the plot. Default is False
fig (bool, advanced): If True, return a bokeh figure instead of
showing the plot. Only use if you want to manipulate the bokeh figure directly.
Example:
>>> import henchman.plotting as hplot
>>> hplot.show_template()
show(plot,
static=False,
png=False,
hover=True,
width=None,
height=None,
title='Temporary title',
x_axis='my xaxis name',
y_axis='my yaxis name',
x_range=(0, 10) or None,
y_range=(0, 10) or None,
colors=None)
>>> hplot.show(plot, width=500, title='My Plot Title')
>>> hplot.show(plot, png=True, static=True)
'''
output_notebook(hide_banner=True)
figargs = {'static': static, 'png': png, 'hover': hover,
'width': width, 'height': height,
'title': title, 'x_axis': x_axis, 'y_axis': y_axis,
'x_range': x_range, 'y_range': y_range,
'colors': colors}
figure = plot(figargs=figargs)
if fig:
return figure
if png:
figargs['static'] = True
return get_screenshot_as_png(plot(figargs=figargs), driver=None)
return io.show(figure)
def gridplot(plots, n_cols=1):
'''Create a gridplot.
This is a wrapper around bokeh gridplot meant to easily work with
henchman plots. Note that the figures must be ``static`` for this to work.
This function call is a work in progress and will likely be depreciated in
favor of something stable.
Args:
plots (list[bokeh.figure]): The plots to show. Either a list or a list of lists.
n_cols (int): The number of columns. This will be ignored if a list of lists is passed in.
Example:
>>> import henchman.plotting as hplot
>>> p1 = hplot.show(plot, static=True, fig=True)
>>> p2 = hplot.show(plot, static=True, fig=True)
>>> hplot.gridplot([p1, p2], n_cols=2)
'''
output_notebook(hide_banner=True)
if isinstance(plots[0], list):
return io.show(layouts.gridplot(plots))
return io.show(layouts.gridplot(plots, ncols=n_cols))
def piechart(col, sort=True, mergepast=None, drop_n=None, figargs=None):
'''Creates a piechart.
Finds all of the unique values in a column and makes a piechart
out of them. By default, this will make a dynamic piechart with
sliders for the different parameters.
Args:
col (pd.Series): The column from which to make the piechart.
sort (bool): Whether or not to sort by frequency for static plot. Default is True.
mergepast (int): Merge infrequent column values for static plot. Default is 10.
drop_n (int): How many high frequency values to drop for static plot. Default is None.
Example:
If the dataframe ``X`` has a column named ``car_color``:
>>> import henchman.plotting as hplot
>>> plot = hplot.piechart(X['car_color'])
>>> hplot.show(plot)
For a static plot:
>>> import henchman.plotting as hplot
>>> plot = hplot.piechart(X['car_color'], sort=False)
>>> hplot.show(plot, static=True)
'''
if figargs is None:
return lambda figargs: piechart(col, sort, mergepast, drop_n, figargs)
source = ColumnDataSource(_make_piechart_source(col, mergepast, sort, drop_n, figargs))
plot = _make_piechart_plot(source, figargs)
plot = _modify_plot(plot, figargs)
if figargs['static']:
return plot
def modify_doc(doc, col, sort, mergepast, drop_n, figargs):
def callback(attr, old, new):
try:
source.data = ColumnDataSource(
_make_piechart_source(col,
sort=sorted_button.active,
mergepast=merge_slider.value,
drop_n=drop_slider.value,
figargs=figargs)).data
except Exception as e:
print(e)
sorted_button, merge_slider, drop_slider = _piechart_widgets(
col, sort, mergepast, drop_n, callback)
doc.add_root(
column(row(column(merge_slider, drop_slider), sorted_button), plot))
return lambda doc: modify_doc(doc, col, sort, mergepast, drop_n, figargs)
def histogram(col, y=None, n_bins=10, col_max=None, col_min=None,
normalized=False, figargs=None):
'''Creates a histogram.
This function takes a single input and creates a histogram from it.
There is an optional second column input for labels, if you would
like to see how a label is distributed relative to your numeric
variable.
Args:
col (pd.Series): The column from which to make a histogram.
y (pd.Series, optional): A binary label that you would like to track.
n_bins (int): The number of bins of the histogram. Default is 10.
col_max (float): Maximum value to include in histogram.
col_min (float): Minimum value to include in histogram.
normalized (bool): Whether or not to normalize the columns. Default is False.
Example:
If the dataframe ``X`` has a column named ``amount`` and
a label ``y``, you can compare them with
>>> import henchman.plotting as hplot
>>> plot1 = hplot.histogram(X['amount'], y, normalized=False)
>>> hplot.show(plot1)
If you wanted a single variable histogram instead, omit y:
>>> plot2 = hplot.histogram(X['amount'], col_max=200, n_bins=20)
>>> hplot.show(plot2)
'''
if figargs is None:
return lambda figargs: histogram(
col, y, n_bins, col_max, col_min,
normalized, figargs=figargs)
source = ColumnDataSource(_make_histogram_source(col, y, n_bins, col_max, col_min, normalized))
plot = _make_histogram_plot(y, source, figargs)
plot = _modify_plot(plot, figargs)
if figargs['static']:
return plot
def modify_doc(doc, col, y, n_bins, col_max, col_min, normalized, figargs):
def callback(attr, old, new):
try:
source.data = ColumnDataSource(_make_histogram_source(
col, y, n_bins=slider.value, col_max=range_select.value[1],
col_min=range_select.value[0], normalized=normalized)).data
except Exception as e:
print(e)
slider, range_select = _histogram_widgets(col, y, n_bins, col_max, col_min, callback)
doc.add_root(column(slider, range_select, plot))
return lambda doc: modify_doc(doc, col, y, n_bins, col_max, col_min, normalized, figargs)
def timeseries(col_1, col_2, col_max=None, col_min=None, n_bins=10,
aggregate='mean', figargs=None):
'''Creates a time based aggregations of a numeric variable.
This function allows for the user to mean, count, sum or find the min
or max of a second variable with regards to a timeseries.
Args:
col_1 (pd.Series): The column from which to create bins. Must be a datetime.
col_2 (pd.Series): The column to aggregate.
col_max (pd.datetime): The maximum value for the x-axis. Default is None.
col_min (pd.datetime): The minimum value for the x-axis. Default is None.
n_bins (int): The number of time bins to make.
aggregate (str): What aggregation to do on the numeric column. Options are
'mean', 'sum', 'count', 'max' and 'min'. Default is 'mean'.
Example:
If the dataframe ``X`` has a columns named ``amount`` and ``date``.
>>> import henchman.plotting as hplot
>>> plot = hplot.timeseries(X['date'], X['amount'])
>>> hplot.show(plot)
For a bokeh plot without sliders:
>>> plot2 = hplot.timeseries(X['date'], X['amount'], n_bins=50)
>>> hplot.show(plot2, static=True)
'''
if figargs is None:
return lambda figargs: timeseries(col_1, col_2, col_max, col_min,
n_bins, aggregate, figargs=figargs)
source = ColumnDataSource(_make_timeseries_source(col_1, col_2, col_max,
col_min, n_bins, aggregate))
plot = _make_timeseries_plot(source, figargs)
plot = _modify_plot(plot, figargs)
if figargs['static']:
return plot
def modify_doc(doc, col_1, col_2, col_max, col_min, n_bins, aggregate, figargs):
def callback(attr, old, new):
try:
source.data = ColumnDataSource(
_make_timeseries_source(col_1, col_2,
col_max=range_select.value_as_datetime[1],
col_min=range_select.value_as_datetime[0],
n_bins=slider.value,
aggregate=dropdown.value)).data
dropdown.label = dropdown.value
except Exception as e:
print(e)
slider, range_select, dropdown = _timeseries_widgets(
col_1, col_2, col_max, col_min, n_bins, aggregate, callback)
doc.add_root(column(slider, range_select, dropdown, plot))
return lambda doc: modify_doc(
doc, col_1, col_2, col_max, col_min, n_bins, aggregate, figargs)
def scatter(col_1, col_2, cat=None, label=None, aggregate='last',
figargs=None):
'''Creates a scatter plot of two variables.
This function allows for the display of two variables with
an optional argument to groupby. By default, this
allows for the user to see what two variable looks like as
grouped by another. A standard example would be to look at
the "last" row for a column that's changing over time.
Args:
col_1 (pd.Series): The x-values of the plotted points.
col_2 (pd.Series): The y-values of the plotted points.
cat (pd.Series, optional): A categorical variable to aggregate by.
label (pd.Series, optional): A numeric label to be used in the hovertool.
aggregate (str): The aggregation to use. Options are 'mean', 'last', 'sum', 'max' and 'min'.
Example:
If the dataframe ``X`` has a columns named ``amount`` and ``quantity``.
>>> import henchman.plotting as hplot
>>> plot = hplot.scatter(X['amount'], X['quantity'])
>>> hplot.show(plot)
If you would like to see the amount, quantity pair as aggregated by the ``month`` column:
>>> plot2 = hplot.scatter(X['date'], X['amount'], cat=X['month'], aggregate='mean')
>>> hplot.show(plot2)
'''
if figargs is None:
return lambda figargs: scatter(
col_1, col_2, cat, label, aggregate, figargs=figargs)
source = ColumnDataSource(_make_scatter_source(col_1, col_2, cat, label, aggregate))
plot = _make_scatter_plot(col_1, col_2, label, cat, source, figargs)
plot = _modify_plot(plot, figargs)
if figargs['static']:
return plot
def modify_doc(doc, col_1, col_2, cat, label, aggregate, figargs):
def callback(attr, old, new):
try:
source.data = ColumnDataSource(
_make_scatter_source(col_1, col_2, cat, label, aggregate=dropdown.value)).data
dropdown.label = dropdown.value
except Exception as e:
print(e)
dropdown = _scatter_widgets(col_1, col_2, aggregate, callback)
if cat is not None:
doc.add_root(column(dropdown, plot))
else:
doc.add_root(plot)
return lambda doc: modify_doc(doc, col_1, col_2, cat, label, aggregate, figargs)
def feature_importances(X, model, n_feats=5, figargs=None):
'''Plot feature importances.
Args:
X (pd.DataFrame): A dataframe with which you have trained.
model: Any fit model with a ``feature_importances_`` attribute.
n_feats (int): The number of features to plot.
Example:
>>> import henchman.plotting as hplot
>>> plot = hplot.feature_importances(X, model, n_feats=10)
>>> hplot.show(plot)
'''
if figargs is None:
return lambda figargs: feature_importances(X, model, n_feats, figargs=figargs)
feature_imps = _raw_feature_importances(X, model)
features = [f[1] for f in feature_imps[0:n_feats]][::-1]
importances = [f[0] for f in feature_imps[0:n_feats]][::-1]
output_notebook()
source = ColumnDataSource(data={'feature': features,
'importance': importances})
plot = figure(y_range=features,
height=500,
title="Random Forest Feature Importances")
plot.hbar(y='feature',
right='importance',
height=.8,
left=0,
source=source,
color="#008891")
plot.toolbar_location = None
plot.yaxis.major_label_text_font_size = '10pt'
plot = _modify_plot(plot, figargs)
return plot
def roc_auc(X, y, model, pos_label=1, prob_col=1, n_splits=1, figargs=None):
'''Plots the reveiver operating characteristic curve.
This function creates a fit model and shows the results of the roc curve.
Args:
X (pd.DataFrame): The dataframe on which to create a model.
y (pd.Series): The labels for which to create a model.
pos_label (int): Which label to check for fpr and tpr. Default is 1.
prob_col (int): The columns of the probs dataframe to use.
n_splits (int): The number of splits to use in validation.
Example:
If the dataframe ``X`` has a binary classification label y:
>>> import henchman.plotting as hplot
>>> from sklearn.ensemble import RandomForestClassifier
>>> plot = hplot.roc_auc(X, y, RandomForestClassifier())
>>> hplot.show(plot)
'''
if figargs is None:
return lambda figargs: roc_auc(X, y, model, pos_label,
prob_col, n_splits, figargs=figargs)
(scores, model), df_list = create_model(
X, y, model, roc_auc_score, _return_df=True, n_splits=n_splits)
probs = model.predict_proba(df_list[1])
fpr, tpr, thresholds = roc_curve(df_list[3],
probs[:, prob_col],
pos_label=pos_label)
tools = ['box_zoom', 'save', 'reset']
plot = figure(tools=tools)
plot.line(x=fpr, y=tpr)
plot.title.text = 'Receiver operating characteristic'
plot.xaxis.axis_label = 'False Positive Rate'
plot.yaxis.axis_label = 'True Positive Rate'
plot.line(x=fpr, y=fpr, color='red', line_dash='dashed')
plot = _modify_plot(plot, figargs)
return(plot)
def dendrogram(D, figargs=None):
'''Creates a dendrogram plot.
This plot can show full structure of a given dendrogram.
Args:
D (henchman.selection.Dendrogram): An initialized dendrogram object
Examples:
>>> from henchman.selection import Dendrogram
>>> from henchman.plotting import show
>>> import henchman.plotting as hplot
>>> D = Dendrogram(X)
>>> plot = hplot.dendrogram(D)
>>> show(plot)
'''
if figargs is None:
return lambda figargs: dendrogram(D, figargs=figargs)
G = nx.Graph()
vertices_source = ColumnDataSource(
pd.DataFrame({'index': D.columns.keys(),
'desc': list(D.columns.values())}))
edges_source = ColumnDataSource(
pd.DataFrame(D.edges[0]).rename(
columns={1: 'end', 0: 'start'}))
step_source = ColumnDataSource(
pd.DataFrame({'step': [0],
'thresh': [D.threshlist[0]],
'components': [len(D.graphs[0])]}))
G.add_nodes_from([str(x) for x in vertices_source.data['index']])
G.add_edges_from(zip(
[str(x) for x in edges_source.data['start']],
[str(x) for x in edges_source.data['end']]))
graph_renderer = from_networkx(G, nx.circular_layout,
scale=1, center=(0, 0))
graph_renderer.node_renderer.data_source = vertices_source
graph_renderer.node_renderer.view = CDSView(source=vertices_source)
graph_renderer.edge_renderer.data_source = edges_source
graph_renderer.edge_renderer.view = CDSView(source=edges_source)
plot = Plot(plot_width=400, plot_height=400,
x_range=Range1d(-1.1, 1.1),
y_range=Range1d(-1.1, 1.1))
plot.title.text = "Feature Connectivity"
graph_renderer.node_renderer.glyph = Circle(
size=5, fill_color=Spectral4[0])
graph_renderer.node_renderer.selection_glyph = Circle(
size=15, fill_color=Spectral4[2])
graph_renderer.edge_renderer.data_source = edges_source
graph_renderer.edge_renderer.glyph = MultiLine(line_color="#CCCCCC",
line_alpha=0.6,
line_width=.5)
graph_renderer.edge_renderer.selection_glyph = MultiLine(
line_color=Spectral4[2],
line_width=3)
graph_renderer.node_renderer.hover_glyph = Circle(
size=5,
fill_color=Spectral4[1])
graph_renderer.selection_policy = NodesAndLinkedEdges()
graph_renderer.inspection_policy = NodesAndLinkedEdges()
plot.renderers.append(graph_renderer)
plot.add_tools(
HoverTool(tooltips=[("feature", "@desc"),
("index", "@index"), ]),
TapTool(),
BoxZoomTool(),
SaveTool(),
ResetTool())
plot = _modify_plot(plot, figargs)
if figargs['static']:
return plot
def modify_doc(doc, D, figargs):
data_table = DataTable(source=step_source,
columns=[TableColumn(field='step',
title='Step'),
TableColumn(field='thresh',
title='Thresh'),
TableColumn(field='components',
title='Components')],
height=50, width=400)
def callback(attr, old, new):
try:
edges = D.edges[slider.value]
edges_source.data = ColumnDataSource(
pd.DataFrame(edges).rename(columns={1: 'end',
0: 'start'})).data
step_source.data = ColumnDataSource(
{'step': [slider.value],
'thresh': [D.threshlist[slider.value]],
'components': [len(D.graphs[slider.value])]}).data
except Exception as e:
print(e)
slider = Slider(start=0,
end=(len(D.edges) - 1),
value=0,
step=1,
title="Step")
slider.on_change('value', callback)
doc.add_root(column(slider, data_table, plot))
return lambda doc: modify_doc(doc, D, figargs)
def f1(X, y, model, n_precs=1000, n_splits=1, figargs=None):
'''Plots the precision, recall and f1 at various thresholds.
This function creates a fit model and shows the precision,
recall and f1 results at multiple thresholds.
Args:
X (pd.DataFrame): The dataframe on which to create a model.
y (pd.Series): The labels for which to create a model.
n_precs (int): The number of thresholds to sample between 0 and 1.
n_splits (int): The number of splits to use in validation.
Example:
If the dataframe ``X`` has a binary classification label ``y``:
>>> import henchman.plotting as hplot
>>> from sklearn.ensemble import RandomForestClassifier
>>> plot = hplot.f1(X, y, RandomForestClassifier())
>>> hplot.show(plot)
'''
if figargs is None:
return lambda figargs: f1(X, y, model, n_precs,
n_splits, figargs=figargs)
(scores, model), df_list = create_model(
X, y, model, roc_auc_score, _return_df=True, n_splits=n_splits)
probs = model.predict_proba(df_list[1])
threshes = [x/float(n_precs) for x in range(0, n_precs)]
precisions = [precision_score(df_list[3], probs[:, 1] > t) for t in threshes]
recalls = [recall_score(df_list[3], probs[:, 1] > t) for t in threshes]
fones = [f1_score(df_list[3], probs[:, 1] > t) for t in threshes]
tools = ['box_zoom', 'save', 'reset']
plot = figure(tools=tools)
plot.line(x=threshes, y=precisions, color='green', legend='precision')
plot.line(x=threshes, y=recalls, color='blue', legend='recall')
plot.line(x=threshes, y=fones, color='red', legend='f1')
plot.xaxis.axis_label = 'Threshold'
plot.title.text = 'Precision, Recall, and F1 by Threshold'
plot = _modify_plot(plot, figargs)
return(plot)
# Piechart Utilities #
def _make_piechart_source(col, mergepast=None, sort=True, drop_n=None, figargs=None):
if mergepast is None:
mergepast = col.nunique()
values = col.reset_index().groupby(col.name).count()
total = float(col.shape[0])
counts = values[values.columns[0]].tolist()
percents = [x / total for x in counts]
tmp = pd.DataFrame({'names': values.index,
'counts': counts,
'percents': percents})
if sort:
tmp = tmp.sort_values(by='counts', ascending=False)
if drop_n:
tmp = tmp.iloc[drop_n:]
tmp['percents'] = tmp['percents']/tmp['percents'].sum()
starts = []
ends = []
loc = 0
for perc in tmp['percents']:
starts.append(loc)
loc += 2*pi*perc
ends.append(loc)
tmp['starts'] = starts
tmp['ends'] = ends
if mergepast < tmp.shape[0]:
percent = tmp.iloc[mergepast:]['percents'].sum()
count = tmp.iloc[mergepast:]['counts'].sum()
start = tmp.iloc[mergepast:mergepast+1]['starts'].values
end = tmp.iloc[-1:]['ends'].values
tmp = pd.concat([tmp.iloc[:mergepast],
pd.DataFrame({'names': ['Other'],
'counts': [count],
'percents': [percent],
'starts': start,
'ends': end})])
if figargs['colors'] is None:
figargs['colors'] = Category20[20]
tmp['colors'] = [figargs['colors'][i % (len(figargs['colors'])-1)]
for i, _ in enumerate(tmp['names'])]
return tmp
def _make_piechart_plot(source, figargs):
tools = ['box_zoom', 'save', 'reset']
if figargs['hover']:
hover = HoverTool(
tooltips=[
("Name", " @names"),
("Count", " @counts"),
("Percent", " @percents{0%}"),
],
mode='mouse')
tools = tools + [hover]
plot = figure(height=500, tools=tools, toolbar_location='above')
plot.wedge(x=0, y=0,
radius=0.3,
start_angle='starts',
end_angle='ends',
line_color='white',
color='colors',
legend='names',
source=source)
plot.axis.axis_label = None
plot.axis.visible = False
plot.grid.grid_line_color = None
return plot
def _piechart_widgets(col, sort, mergepast, drop_n, callback):
if sort:
active = [0]
else:
active = []
sorted_button = CheckboxGroup(
labels=["Sorted"], active=active)
sorted_button.on_change('active', callback)
merge_slider = Slider(start=1, end=col.nunique(),
value=mergepast or col.nunique(), step=1,
title="Merge Slider")
merge_slider.on_change('value', callback)
drop_slider = Slider(start=0, end=col.nunique(),
value=drop_n or 0, step=1,
title="Drop Slider")
drop_slider.on_change('value', callback)
return sorted_button, merge_slider, drop_slider
# Timeseries Utilities #
def _make_timeseries_source(col_1, col_2, col_max=None, col_min=None, n_bins=10, aggregate='mean'):
col_1_time = pd.to_datetime(col_1)
if col_max is None:
col_max = col_1_time.max()
if col_min is None:
col_min = col_1_time.min()
truncated = col_1_time[(col_1_time <= col_max) & (col_1_time >= col_min)]
tmp = pd.DataFrame({col_1.name: truncated,
'height': col_2,
'splits': pd.cut(pd.to_numeric(truncated), n_bins, right=False)})
tmp = tmp.groupby('splits')['height'].aggregate(aggregate).reset_index()
tmp['left'] = list(tmp['splits'].apply(lambda x: pd.to_datetime(x.left)))
tmp['right'] = list(tmp['splits'].apply(lambda x: pd.to_datetime(x.right)))
tmp = tmp[['left', 'right', 'height']]
return tmp
def _make_timeseries_plot(source, figargs):
tools = ['box_zoom', 'save', 'reset']
if figargs['hover']:
hover = HoverTool(
tooltips=[
("Height", " @height"),
("Bin", " [@left{%R %F}, @right{%R %F})")
],
formatters={
'left': 'datetime',
'right': 'datetime'
},
mode='mouse')
tools += [hover]
plot = figure(tools=tools, x_axis_type='datetime')
if figargs['colors'] is None:
plot_color = '#1F77B4'
line_color = 'white'
else:
assert len(figargs['colors']) >= 2
plot_color = figargs['colors'][0]
line_color = figargs['colors'][1]
plot.quad(top='height', bottom=0,
left='left', right='right', color=plot_color,
line_color=line_color, source=source, fill_alpha=.5)
return plot
def _timeseries_widgets(col_1, col_2, col_max, col_min, n_bins, aggregate, callback):
col_1_time = pd.to_datetime(col_1)
if col_max is None:
col_max = col_1_time.max()
if col_min is None:
col_min = col_1_time.min()
slider = Slider(start=1, end=100,
value=n_bins, step=1,
title="Bins")
slider.on_change('value', callback)
range_select = DateRangeSlider(start=col_1_time.min(),
end=col_1_time.max(),
value=(col_min,
col_max),
step=1, title='Range', format='%R %F')
range_select.on_change('value', callback)
dropdown = Dropdown(value=aggregate, label=aggregate,
button_type="default",
menu=[('mean', 'mean'),
('count', 'count'),
('sum', 'sum'),
('max', 'max'),
('min', 'min')])
dropdown.on_change('value', callback)
return slider, range_select, dropdown
# Histogram Utilities #
def _make_histogram_source(col, y, n_bins, col_max, col_min, normalized):
if col_max is None:
col_max = col.max()
if col_min is None:
col_min = col.min()
truncated = col[(col <= col_max) & (col >= col_min)]
hist, edges = np.histogram(truncated, bins=n_bins, density=normalized)
if normalized:
hist = [height * (edges[1] - edges[0]) for height in hist]
cols = pd.DataFrame({'col': col, 'label': y})
tmp = pd.DataFrame({'hist': hist,
'left': edges[:-1],
'right': edges[1:]})
if y is not None:
label_hist = np.nan_to_num(cols['label'].groupby(
pd.cut(col, edges, right=False)).sum().values)
if normalized:
label_hist = label_hist / (label_hist.sum())
tmp['label'] = label_hist
return tmp
def _make_histogram_plot(y, source, figargs):
tools = ['box_zoom', 'save', 'reset']
if figargs['hover']:
if y is not None:
hover = HoverTool(
tooltips=[
("Height", " @hist"),
("Label", " @label"),
("Bin", " [@left{0.00}, @right{0.00})"),
],
mode='mouse')
else:
hover = HoverTool(
tooltips=[
("Height", " @hist"),
("Bin", " [@left{0.00}, @right{0.00})"),
],
mode='mouse')
tools += [hover]
if figargs['colors'] is None:
plot_1_color = '#1F77B4'
plot_2_color = 'purple'
line_color = 'white'
else:
assert len(figargs['colors']) >= 3
plot_1_color = figargs['colors'][0]
plot_2_color = figargs['colors'][1]
line_color = figargs['colors'][2]
plot = figure(tools=tools)
plot.quad(top='hist', bottom=0, left='left',
right='right', color=plot_1_color, line_color=line_color,
source=source, fill_alpha=.5)
if y is not None:
plot.quad(top='label', bottom=0, left='left',
right='right', color=plot_2_color,
line_color=line_color, source=source, fill_alpha=.5)
return plot
def _histogram_widgets(col, y, n_bins, col_max, col_min, callback):
if col_max is None:
col_max = col.max()
if col_min is None:
col_min = col.min()
slider = Slider(start=1, end=100, value=n_bins, step=1, title="Bins")
slider.on_change('value', callback)
range_select = RangeSlider(start=col.min(),
end=col.max(),
value=(col_min, col_max),
step=5, title='Histogram Range')
range_select.on_change('value', callback)
return slider, range_select
# Scatter Utilities #
def _make_scatter_source(col_1, col_2, cat=None, label=None, aggregate='last'):
tmp = | pd.DataFrame({'col_1': col_1, 'col_2': col_2}) | pandas.DataFrame |
import unittest
import unittest.mock as mock
import asyncio
import warnings
from fplpandas import FPLPandas
import logging as log
import pandas as pd
from pandas.util.testing import assert_frame_equal
log.basicConfig(level=log.INFO, format='%(message)s')
class TestFplPandas(unittest.TestCase):
def test_get_teams(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
fpl_mock = mock.MagicMock()
async def mock_get_team(team_ids, return_json):
self.assertEqual(team_ids, None)
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_teams = mock_get_team
fpl = FPLPandas(fpl=fpl_mock)
actual_df = fpl.get_teams()
expected_df = pd.DataFrame.from_dict(test_data).set_index('id')
self.assertTrue(expected_df.equals(actual_df))
def test_get_teams_with_ids(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
fpl_mock = mock.MagicMock()
async def mock_get_team(team_ids, return_json):
self.assertEqual(team_ids, [1, 2])
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_teams = mock_get_team
fpl = FPLPandas(fpl=fpl_mock)
actual_df = fpl.get_teams([1, 2])
expected_df = pd.DataFrame.from_dict(test_data).set_index('id')
self.assertTrue(expected_df.equals(actual_df))
def test_get_game_weeks(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
fpl_mock = mock.MagicMock()
async def mock_game_weeks(game_week_ids, return_json):
self.assertEqual(game_week_ids, None)
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_gameweeks = mock_game_weeks
fpl = FPLPandas(fpl=fpl_mock)
actual_df = fpl.get_game_weeks()
expected_df = pd.DataFrame.from_dict(test_data).set_index('id')
self.assertTrue(expected_df.equals(actual_df))
def test_get_game_weeks_with_ids(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
fpl_mock = mock.MagicMock()
async def mock_get_game_weeks(game_week_ids, return_json):
self.assertEqual(game_week_ids, [1, 2])
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_gameweeks = mock_get_game_weeks
fpl = FPLPandas(fpl=fpl_mock)
actual_df = fpl.get_game_weeks([1, 2])
expected_df = pd.DataFrame.from_dict(test_data).set_index('id')
self.assertTrue(expected_df.equals(actual_df))
def test_get_fixtures(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
fpl_mock = mock.MagicMock()
@asyncio.coroutine
def mock_get_fixtures(return_json):
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_fixtures = mock_get_fixtures
fpl = FPLPandas(fpl=fpl_mock)
actual_df = fpl.get_fixtures()
expected_df = pd.DataFrame.from_dict(test_data).set_index('id')
self.assertTrue(expected_df.equals(actual_df))
def test_get_player(self):
test_data = {'id': 1, 'attr1': 'value11', 'attr2': 'value12',
'history_past': [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12'},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22'}],
'history': [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22'}],
'fixtures': [{'event': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22'}]
}
expected_history_past = [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1}]
expected_history = [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1}]
expected_fixtures = [{'event': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1}]
expected_player_df = pd.DataFrame.from_records([test_data], index=['id']).rename(index={'id': 'player_id'})
expected_history_past_df = pd.DataFrame.from_dict(expected_history_past).set_index(['player_id', 'season_name'])
expected_history_df = pd.DataFrame.from_dict(expected_history).set_index(['player_id', 'fixture'])
expected_fixtures_df = pd.DataFrame.from_dict(expected_fixtures).set_index(['player_id', 'event'])
fpl_mock = mock.MagicMock()
async def mock_get_player(player_id, players, include_summary, return_json):
self.assertEqual(player_id, 1)
self.assertEqual(players, None)
self.assertEqual(include_summary, True)
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_player = mock_get_player
fpl = FPLPandas(fpl=fpl_mock)
actual_player_df, actual_history_past_df, actual_history_df, actual_fixture_df = fpl.get_player(1)
assert_frame_equal(expected_player_df, actual_player_df)
assert_frame_equal(expected_history_past_df, actual_history_past_df)
assert_frame_equal(expected_history_df, actual_history_df)
assert_frame_equal(expected_fixtures_df, actual_fixture_df)
def test_get_player_with_no_history(self):
test_data = {'id': 1, 'attr1': 'value11', 'attr2': 'value12',
'history_past': [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12'},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22'}],
'history': [],
'fixtures': [{'event': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22'}]
}
expected_history_past = [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1}]
expected_fixtures = [{'event': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1}]
expected_player_df = pd.DataFrame.from_records([test_data], index=['id']).rename(index={'id': 'player_id'})
expected_history_past_df = pd.DataFrame.from_dict(expected_history_past).set_index(['player_id', 'season_name'])
expected_history_df = pd.DataFrame(columns=['player_id', 'fixture']).set_index(['player_id', 'fixture'])
expected_fixtures_df = pd.DataFrame.from_dict(expected_fixtures).set_index(['player_id', 'event'])
fpl_mock = mock.MagicMock()
async def mock_get_player(player_id, players, include_summary, return_json):
self.assertEqual(player_id, 1)
self.assertEqual(players, None)
self.assertEqual(include_summary, True)
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_player = mock_get_player
fpl = FPLPandas(fpl=fpl_mock)
actual_player_df, actual_history_past_df, actual_history_df, actual_fixture_df = fpl.get_player(1)
assert_frame_equal(expected_player_df, actual_player_df)
assert_frame_equal(expected_history_past_df, actual_history_past_df)
assert_frame_equal(expected_history_df, actual_history_df, check_index_type=False)
assert_frame_equal(expected_fixtures_df, actual_fixture_df)
def test_get_players_all(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12',
'history_past': [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12'},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22'}],
'history': [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22'}],
'fixtures': [{'event': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22'}]
},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22',
'history_past': [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12'},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22'}],
'history': [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22'}],
'fixtures': [{'event': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22'}]
}]
expected_players= [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
expected_history_past = [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1},
{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12', 'player_id': 2},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22', 'player_id': 2}]
expected_history = [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1},
{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 2},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 2}]
expected_fixtures= [{'event': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1},
{'event': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 2},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 2}]
expected_players_df = pd.DataFrame.from_dict(expected_players).set_index('id').rename(index={'id': 'player_id'})
expected_history_past_df = pd.DataFrame.from_dict(expected_history_past).set_index(['player_id', 'season_name'])
expected_history_df = pd.DataFrame.from_dict(expected_history).set_index(['player_id', 'fixture'])
expected_fixtures_df = pd.DataFrame.from_dict(expected_fixtures).set_index(['player_id', 'event'])
fpl_mock = mock.MagicMock()
async def mock_get_players(player_ids, include_summary, return_json):
self.assertIsNone(player_ids)
self.assertEqual(include_summary, True)
self.assertEqual(return_json, True)
return test_data
fpl_mock.get_players = mock_get_players
fpl = FPLPandas(fpl=fpl_mock)
actual_players_df, actual_history_past_df, actual_history_df, actual_fixture_df = fpl.get_players()
assert_frame_equal(expected_players_df, actual_players_df)
assert_frame_equal(expected_history_past_df, actual_history_past_df)
assert_frame_equal(expected_history_df, actual_history_df)
assert_frame_equal(expected_fixtures_df, actual_fixture_df)
def test_get_players_with_ids(self):
test_data = [{'id': 1, 'attr1': 'value11', 'attr2': 'value12',
'history_past': [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12'},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22'}],
'history': [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22'}],
'fixtures': [{'event': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22'}]
},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22',
'history_past': [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12'},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22'}],
'history': [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22'}],
'fixtures': [{'event': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22'}]
}]
expected_players= [{'id': 1, 'attr1': 'value11', 'attr2': 'value12'},
{'id': 2, 'attr1': 'value21', 'attr2': 'value22'}]
expected_history_past = [{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1},
{'season_name': '2017/18', 'attr1': 'value11', 'attr2': 'value12', 'player_id': 2},
{'season_name': '2018/19', 'attr1': 'value21', 'attr2': 'value22', 'player_id': 2}]
expected_history = [{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1},
{'fixture': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 2},
{'fixture': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 2}]
expected_fixtures= [{'event': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 1},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 1},
{'event': 1, 'attr1': 'value11', 'attr2': 'value12', 'player_id': 2},
{'event': 2, 'attr1': 'value21', 'attr2': 'value22', 'player_id': 2}]
expected_players_df = | pd.DataFrame.from_dict(expected_players) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import sklearn
import pickle
import csv
from datetime import datetime as dt
from datetime import timedelta
import inspect
import os
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import recall_score
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
pd.options.mode.chained_assignment = None
# input filepaths
file = 'C:/Users/KC/Desktop/Project2/CGMData.csv'
file2 = 'C:/Users/KC/Desktop/Project2/InsulinData.csv'
file3 = 'C:/Users/KC/Desktop/Project2/CGM_patient2.csv'
file4 = 'C:/Users/KC/Desktop/Project2/Insulin_patient2.csv'
# output filepath for Result.csv
out_file = 'C:/Users/KC/Desktop/Project2/Result.csv'
meal_dfs = []
no_meal_dfs = []
################################################################################################################################
def train_rfc(feats_df):
X = feats_df.drop('Target', axis = 1)
Y = feats_df['Target']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.8, random_state = 0, stratify = Y)
X_cols = X.columns.tolist()
ss = StandardScaler()
X_train_scaled = ss.fit_transform(X_train)
X_test_scaled = ss.transform(X_test)
Y_train = np.array(Y_train)
rfc = RandomForestClassifier()
n_estimators = [100, 400,600,700]
max_features = ['sqrt']
max_depth = [10, 15, 20]
min_samples_split = [2,7,18, 23]
min_samples_leaf = [2, 7, 13]
bootstrap = [False]
param_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
gs = GridSearchCV(rfc, param_grid, cv = 5, verbose = 1, n_jobs=-1)
gs.fit(X_train_scaled, Y_train)
rfc = gs.best_estimator_
gs.best_params_
gs_df = pd.DataFrame(gs.cv_results_).sort_values('rank_test_score').reset_index(drop=True)
Y_pred_gs = rfc.predict(X_test_scaled)
conf_matrix = pd.DataFrame(confusion_matrix(Y_test, Y_pred_gs), index = ['actual 0', 'actual 1'], columns = ['predicted 0', 'predicted 1'])
display(conf_matrix)
display('Random Forest Recall Score', recall_score(Y_test, Y_pred_gs))
# Save Model Using Pickle
filename = 'finalized_model.sav'
pickle.dump(rfc, open(filename, 'wb'))
################################################################################################################################
def feat_extraction(meal_dfs, no_meal_dfs):
feats_dict = {}
set_switch = 0
r = 0
desc_stats = ['mean', '50%', 'max', 'min', 'std']
datasets = [no_meal_dfs, meal_dfs]
while set_switch < 2:
for set in datasets:
for y in range(len(set)):
X = set[y]
ISIG = X['ISIG Value']
X['ISIG_diff'] = X['ISIG Value'].diff()
X['ISIG_diff_2'] = X['ISIG_diff'].diff()
X['Glucose_diff'] = X['Sensor Glucose (mg/dL)'].diff()
X['Glucose_diff_2'] = X['Glucose_diff'].diff()
for z in range(len(X)):
X['Hour'] = X['Datetime'].iloc[z].hour
up, down, steady = 0, 0, 0
for x in range(len(ISIG)-1):
ISIG_now = ISIG.iloc[x]
ISIG_next = ISIG.iloc[x+1]
if (ISIG_next > ISIG_now):
up += 1
elif (ISIG_next == ISIG_now):
steady += 1
elif (ISIG_next < ISIG_now):
down += 1
ud_ratio = up/(up+down+steady)
feats = X.describe()
feat_cols = feats.columns
for col in feat_cols:
feats_dict[col] = {}
for stat in desc_stats:
feat_name = col + ' ' + stat
feats_dict[col][feat_name] = feats.loc[stat][col]
cols_list = []
item_list = []
for k, v in feats_dict.items():
for k1, v1 in v.items():
item_list.append(v1)
cols_list.append(k1)
cols_list.extend(['Range_Glucose', 'Range_ISIG', 'ISIG U/D Ratio', 'Target'])
range_ISIG = X['ISIG Value'].max() - X['ISIG Value'].min()
range_Glucose = X['Sensor Glucose (mg/dL)'].max() - X['Sensor Glucose (mg/dL)'].min()
target = set_switch
new_row = | pd.DataFrame(item_list) | pandas.DataFrame |
from bs4 import BeautifulSoup
import datetime
import json
import lxml
import numpy as np
import os
import pandas as pd
import shutil
import random
import re
import requests
import time
from a0001_admin import clean_dataframe
from a0001_admin import name_paths
from a0001_admin import retrieve_datetime
from a0001_admin import retrieve_format
from a0001_admin import retrieve_list
from a0001_admin import retrieve_path
from a0001_admin import write_paths
from a0001_admin import work_completed
from a0001_admin import work_to_do
"""
Reference: https://python.plainenglish.io/scrape-google-scholar-with-python-fc6898419305
"""
def query_pubs(dataset):
"""
Maximum information
Minimal scrapes
Check for redundency
"""
search_gscholar(dataset)
#json_to_dataframe()
#aggregate_json()
#search_articles()
#aggregate_df()
"""
# scrape json of specific publications by their title
work_completed('acquire_gscholar_missing_json_scraped', 0)
missing_json_scraped()
work_completed('acquire_gscholar_missing_json_scraped', 1)
# scrape json from gscholar
work_completed('gscholar_json_scraped', 0)
json_scraped()
work_completed('gscholar_json_scraped', 1)
# scrape html from gscholar and save
# parse json from scraped html
# convert json to df
work_completed('gscholar_json_to_dataframe', 0)
json_to_dataframe()
work_completed('gscholar_json_to_dataframe', 1)
work_completed('gscholar_aggregate_articles', 0)
aggregate_articles()
work_completed('gscholar_aggregate_articles', 1)
# scrape metadata for each article as html
# scrape metaata for each article as json
# add article metadata to df
"""
def check_scraped(name_dataset, term, year, num):
"""
"""
name_src, name_dst, name_summary, name_unique, plot_unique = name_paths(name_dataset)
src_path = retrieve_path(name_src)
paths_to_check = [src_path]
if name_dataset == 'gscholar':
src_path_json = os.path.join(src_path, 'json')
src_path = src_path_json
#paths_to_check.append(src_path_json)
#df_path = os.path.join(retrieve_path(str(name_dataset + '_article_df')))
#paths_to_check.append(df_path)
try:
df_path = os.path.join(retrieve_path(name_src), 'df')
df_file = os.path.join(df_path, term + '.csv')
df = pd.read_csv(df_file)
df = df[(df['year'] == year)]
num_int = int(num.lstrip())*10
#print('num_int = ' + str(num_int))
if len(list(df['year'])) < num_int:
#print('found: ' + 'gscholar' + ' ' + term + ' ' + str(year) + ' ' + str(num))
return(True)
except:
hello = 'hello'
for file in os.listdir(src_path):
#print('file = ' + file)
# check specific gscholar search
file_split = file.split('.')
if file_split[0] == term: return(True)
# find and compare file term to term passed into the function
pattern = '[a-z]+'
flags = re.IGNORECASE
file_term = re.findall(pattern, file, flags)
file_term = file_term[0]
if file_term != term: continue
#print('file_term = ' + file_term + ' term = ' + term)
# find and compare file year to year passed into the function
pattern = '[0-9]{4}'
file_year = re.findall(pattern, file)
file_year = file_year[0]
if str(file_year) != str(year): continue
#print('file_year = ' + file_year + ' year = ' + str(year))
# find and compare file year to year passed into the function
pattern = '[0-9]{3}'
file_num = re.findall(pattern, file)
file_num = file_num[1]
if str(file_num) != str(num): continue
#print('file_num = ' + file_num + ' num = ' + str(num))
# find and compare file saved date to current date
file = file.split(' ')
file = file[3]
pattern = '[0-9]{4}' + '-' + '[0-9]{2}' + '-' + '[0-9]{2}'
file_date_saved = re.findall(pattern, file)
file_date_saved = file_date_saved[0]
#print('file_date_saved = ' + file_date_saved)
a = file_date_saved.split('-')
a = datetime.datetime(int(a[0]), int(a[1]), int(a[2]), 0, 0)
#print('a = ' + str(a))
b = datetime.datetime.today()
#print('b = ' + str(b))
v = b-a
#print('v = ' + str(v))
v = int(v.days)
#print('v = ' + str(v))
if v < 3:
#print('date match: ' + str(v))
#print('too many days lapsed since last query.')
return(True)
return(False)
def html_to_json(soup):
"""
"""
# Scrape just PDF links
for pdf_link in soup.select('.gs_or_ggsm a'):
pdf_file_link = pdf_link['href']
print(pdf_file_link)
# JSON data will be collected here
data = []
# Container where all needed data is located
for result in soup.select('.gs_ri'):
title = result.select_one('.gs_rt').text
try:
title_link = result.select_one('.gs_rt a')['href']
except:
title_link = ''
publication_info = result.select_one('.gs_a').text
snippet = result.select_one('.gs_rs').text
cited_by = result.select_one('#gs_res_ccl_mid .gs_nph+ a')['href']
related_articles = result.select_one('a:nth-child(4)')['href']
# get the year of publication of each paper
try:
txt_year = result.find("div", class_="gs_a").text
ref_year = re.findall('[0-9]{4}', txt_year)
ref_year = ref_year[0]
except:
ref_year = 0
# get number of citations for each paper
try:
txt_cite = result.find("div", class_="gs_fl").find_all("a")[2].string
citations = txt_cite.split(' ')
citations = (citations[-1])
citations = int(citations)
except:
citations = 0
try:
all_article_versions = result.select_one('a~ a+ .gs_nph')['href']
except:
all_article_versions = None
data.append({
'year': ref_year,
'title': title,
'title_link': title_link,
'publication_info': publication_info,
'snippet': snippet,
'citations': citations,
'cited_by': f'https://scholar.google.com{cited_by}',
'related_articles': f'https://scholar.google.com{related_articles}',
'all_article_versions': f'https://scholar.google.com{all_article_versions}',
})
return(data)
def json_to_dataframe():
"""
"""
name_dataset = 'pubs'
# retrieve archival json
name_src, name_dst, name_summary, name_unique, plot_unique = name_paths(name_dataset)
src_path = retrieve_path(name_src)
#src_path = os.path.join(src_path, 'json')
df_all = pd.DataFrame()
for term in retrieve_list('search_terms'):
df_term = pd.DataFrame()
for file in os.listdir(src_path):
if not file.endswith('.json'): continue
json_src = os.path.join(src_path, file)
df = pd.read_json(json_src)
df_path = os.path.join(retrieve_path(name_src), 'df')
if not os.path.exists(df_path):
os.makedirs(df_path)
df_file = os.path.join(df_path, name_dataset + '.csv')
df_all = df_all.append(df)
df_all = df_all.drop_duplicates(subset = 'title_link')
df_all = clean_dataframe(df_all)
df_all.to_csv(df_file)
if term not in str(file): continue
df_dst = os.path.join(retrieve_path(name_src), 'df')
df_file = os.path.join(df_dst, term + '.csv')
df_term = df_term.append(df)
df_term = df_term.drop_duplicates(subset = 'title_link')
df_term = clean_dataframe(df_term)
df_term.to_csv(df_file)
def search_gscholar(dataset):
"""
Retrieve json year by year
"""
for term in retrieve_list('search_terms'):
print('searching gscholar for term = ' + term)
#json_to_dataframe()
currentDateTime = datetime.datetime.now()
date = currentDateTime.date()
if '-/-/-' in term:
term_split = term.split('-/-/-')
year_range = [term_split[0]]
num_range = [0]
term = term_split[1]
else:
search_year_min = int(retrieve_format('search_year_min'))-1
print('search_year_min = ' + str(search_year_min))
year_range = range(int(date.strftime("%Y")), search_year_min, -1)
num_range = np.arange(0, 100, 1, dtype=int)
for year in year_range:
#work_completed('begin_acquire_gscholar_json_' + str(year), 0)
for num in num_range:
print('term = ' + str(term))
print('year = ' + str(year))
print('start num = ' + str(num*10))
num_str = str(num).zfill(3)
url = 'https://scholar.google.com/scholar?'
url = url + 'start=' + str(int(num*10))
url = url + '&q=' + term
#url = url + '&hl=en&as_sdt=0,5'
url = url + '&hl=en&as_sdt=0,5'
url = url + '&as_ylo=' + str(year)
url = url + '&as_yhi=' + str(year)
# check if recently scraped
if check_scraped(dataset, term, year, num_str):
print('found: ' + dataset + ' ' + term + ' ' + str(year) + ' ' + num_str)
continue
soup = retrieve_html(url)
print('soup = ')
print(soup)
if error_check(soup) == True: return('error')
data = html_to_json(soup)
print('data = ')
print(data)
if data == []: break
#if len(data) < 10 and year != int(date.strftime("%Y")):
#work_completed('begin_acquire_gscholar_json_' + str(year), 1)
data_json = json.dumps(data, indent = 2, ensure_ascii = False)
print(data_json)
name_src, name_dst, name_summary, name_unique, plot_unique = name_paths(dataset)
json_file = os.path.join(retrieve_path(name_src), term + ' ' + str(year) + ' ' + str(num_str) + ' ' + str(retrieve_datetime()) + '.json' )
json_file = open(json_file, 'w')
json_file.write(data_json)
json_file.close()
json_to_dataframe()
def aggregate_json():
"""
"""
def search_articles():
"""
"""
headers = {
'User-agent':
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
proxies = {
'http': os.getenv('HTTP_PROXY') # or just type proxy here without os.getenv()
}
name_dataset = 'gscholar'
name_src, name_dst, name_summary, name_unique, plot_unique = name_paths(name_dataset)
df_path = os.path.join(retrieve_path(name_src), 'df')
df_file = os.path.join(df_path, name_dataset + '.csv')
df = pd.read_csv(df_file)
df = clean_dataframe(df)
df_original = df
for url in list(df_original['title_link']):
try:
char_remove = ['/', '.', ':', 'httpswww']
url_name = url
for char in char_remove:
url_name = url_name.replace(char, '')
url_name = url_name[:40]
except:
url_name = 'none_found'
#if check_scraped(name_dataset, url_name, 0, 0): continue
#df = pd.DataFrame()
df = df_original[(df_original['title_link'] == url)]
print('df = ')
print(df)
df['time_retrieved'] = [retrieve_datetime()]
df['url'] = [url]
print(url)
#time_string = retrieve_datetime()
#wait_time = random.random()*5 + 2
#print('Wait: ' + str(round(wait_time,2)) + ' from ' + str(time_string))
#time.sleep(wait_time)
try:
#html = requests.get(url, headers=headers, proxies=proxies).text
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
except:
soup = ''
try:
content = soup.head.title.text
df['head_title'] = [content]
except:
df['head_title'] = [None]
for tag in retrieve_list('html_meta_tags'):
try:
content = soup.find('meta', {'name':tag}).get('content')
print(tag + ' = ')
print(content)
df[str(tag)] = [content]
except:
df[str(tag)] = [None]
try:
#content = soup.find_all('meta', {'name':tag})
res = []
for i in soup.find_all('meta', {'name':tag}):
res.append(i['content'])
#print(tag + ' = ')
#print(content)
df[str(tag) + '-all'] = [res]
except:
df[str(tag)] = [None]
#df = df.T
print('df = ')
print(df)
df_path = os.path.join(retrieve_path(str(name_dataset + '_article_df')))
df_dst = os.path.join(df_path, url_name + '.csv')
df.to_csv(df_dst)
#print('df_dst = ' + str(df_dst))
def scrape_gscholar_article():
"""
"""
headers = {
'User-agent':
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
proxies = {
'http': os.getenv('HTTP_PROXY') # or just type proxy here without os.getenv()
}
name_dataset = 'gscholar'
name_src, name_dst, name_summary, name_unique, plot_unique = name_paths(name_dataset)
df_path = os.path.join(retrieve_path(name_src), 'df')
df_file = os.path.join(df_path, name_dataset + '.csv')
df = pd.read_csv(df_file)
df = clean_dataframe(df)
df_original = df
for url in list(df_original['title_link']):
try:
char_remove = ['/', '.', ':', 'httpswww']
url_name = url
for char in char_remove:
url_name = url_name.replace(char, '')
url_name = url_name[:40]
except:
url_name = 'none_found'
if check_scraped(name_dataset, url_name, 0, 0): continue
#df = pd.DataFrame()
df = df_original[(df_original['title_link'] == url)]
print('df = ')
print(df)
df['time_retrieved'] = [retrieve_datetime()]
df['url'] = [url]
print(url)
#time_string = retrieve_datetime()
#wait_time = random.random()*5 + 2
#print('Wait: ' + str(round(wait_time,2)) + ' from ' + str(time_string))
#time.sleep(wait_time)
try:
#html = requests.get(url, headers=headers, proxies=proxies).text
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
except:
soup = ''
try:
content = soup.head.title.text
df['head_title'] = [content]
except:
df['head_title'] = [None]
for tag in retrieve_list('html_meta_tags'):
try:
content = soup.find('meta', {'name':tag}).get('content')
print(tag + ' = ')
print(content)
df[str(tag)] = [content]
except:
df[str(tag)] = [None]
try:
#content = soup.find_all('meta', {'name':tag})
res = []
for i in soup.find_all('meta', {'name':tag}):
res.append(i['content'])
#print(tag + ' = ')
#print(content)
df[str(tag) + '-all'] = [res]
except:
df[str(tag)] = [None]
df = df.T
print('df = ')
print(df)
df_path = os.path.join(retrieve_path(str(name_dataset + '_article_df')))
df_dst = os.path.join(df_path, url_name + '.csv')
df.to_csv(df_dst)
#print('df_dst = ' + str(df_dst))
aggregate_articles()
def aggregate_articles():
"""
"""
name_dataset = 'gscholar'
name_src, name_dst, name_summary, name_unique, plot_unique = name_paths(name_dataset)
df_all = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.