prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
from pandas.tseries.holiday import USFederalHolidayCalendar
import datetime
import pandas as pd
def mape(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def rmse(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.sqrt(np.mean((y_true - y_pred) ** 2))
def DR_Temp_data_cleaning(dataframe):
'''
inplace change of the dataframe, for the structure purpose, return this dataframe
'''
dataframe['Date'] = pd.to_datetime(dataframe['Date'])
test = dataframe[
['Date', 'Hour', 'Weekday', 'Month', 'Load', 'Mean_Temp', 'Mean_Humi', 'RIV_Temp', 'RIV_Humi', 'LAX_Temp',
'LAX_Humi', 'USC_Temp', 'USC_Humi', 'WJF_Temp', 'WJF_Humi', 'TRM_Temp', 'TRM_Humi']]
test.loc[:, 'RIV_Temp_Log'] = np.log(dataframe['RIV_Temp'])
test.loc[:, 'LAX_Temp_Log'] = np.log(dataframe['LAX_Temp'])
test.loc[:, 'USC_Temp_Log'] = np.log(dataframe['USC_Temp'])
test.loc[:, 'WJF_Temp_Log'] = np.log(dataframe['WJF_Temp'])
test.loc[:, 'TRM_Temp_Log'] = np.log(dataframe['TRM_Temp'])
test.loc[:, 'Load_Log'] = np.log(dataframe['Load'])
test['Load_Lag_48'] = test['Load_Log'].shift(48, axis=0)
test['Humi_Lag_48'] = test['Mean_Humi'].shift(48, axis=0)
test['RIV_Temp_Log_Lag_48'] = test['RIV_Temp_Log'].shift(48, axis=0)
test['LAX_Temp_Log_Lag_48'] = test['LAX_Temp_Log'].shift(48, axis=0)
test['USC_Temp_Log_Lag_48'] = test['USC_Temp_Log'].shift(48, axis=0)
test['WJF_Temp_Log_Lag_48'] = test['WJF_Temp_Log'].shift(48, axis=0)
test['TRM_Temp_Log_Lag_48'] = test['TRM_Temp_Log'].shift(48, axis=0)
cal = USFederalHolidayCalendar()
holidays = cal.holidays(start='2014-01-01', end=str(datetime.datetime.now()), return_name=True)
holidays = | pd.DataFrame(holidays) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %%capture
# Compile and import local pyrossgeo module
import os, sys
owd = os.getcwd()
os.chdir('../../')
sys.path.insert(0,'../../')
# !python setup.py build_ext --inplace
os.chdir(owd)
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pyrossgeo
import pandas as pd
import json
# -
# # Generate the configuration files
# ### Define model
# +
model = {
"settings" : {
"classes" : ["S", "E", "A", "I", "R"],
"stochastic_threshold_from_below" : [1000, 1000, 1000, 1000, 1000],
"stochastic_threshold_from_above" : [500, 500, 500, 500, 500],
"infection_scaling" : "powerlaw",
"infection_scaling_parameters" : [0, 0.004, 0.5] # a + b * rho^c
},
"S" : {
"linear" : [],
"infection" : [ ["I", "-betaI"], ["A", "-betaA"] ]
},
"E" : {
"linear" : [ ["E", "-gammaE"] ],
"infection" : [ ["I", "betaI"], ["A", "betaA"] ]
},
"A" : {
"linear" : [ ["E", "gammaE"], ["A", "-gammaA"] ],
"infection" : []
},
"I" : {
"linear" : [ ["A", "gammaA"], ["I", "-gammaI"] ],
"infection" : []
},
"R" : {
"linear" : [ ["I", "gammaI"] ],
"infection" : []
}
}
model_classes = model['settings']['classes']
model_dim = len(model_classes)
# -
# ### Configuration generation parameters
#
# Here we define some parameters with which all the configuration files will be generated. Edit these if you want to change the simulation.
# +
sim_config_path = 'london_simulation'
min_num_moving = 20 # Remove all commuting edges where less than `min_num_moving` are moving
# Decide which classes are allowed to commute
allow_class = [
('S', True),
('E', True),
('A', True),
('Ia1', True),
('Ia2', True),
('Ia3', True),
('Is1', True),
('Is2', False),
('Is3', False),
('R', True),
]
# Decide where to seed with infecteds
seed_pop = [
(0, 1, 'E', 100), # Home, age group, model class, seed quantity
(10, 2, 'E', 100),
(23, 0, 'E', 100),
(622, 4, 'E', 100),
(232, 4, 'E', 100)
]
# Node parameters
n_betaI = 0.02
n_betaA = 0.02
n_gammaE = 1/3.0
n_gammaA = 1/3.0
n_gammaI = 1/3.0
# Cnode parameters
cn_betaI = n_betaI
cn_betaA = n_betaA
cn_gammaE = n_gammaE
cn_gammaA = n_gammaA
cn_gammaI = n_gammaI
# Time steps
t_start = 0
t_end = 24*60*100
_, dts = pyrossgeo.utils.get_dt_schedule([
(0, 1*60),
(7*60, 2),
(10*60, 2*60),
(17*60, 2),
(19*60, 2*60)
], end_time=24*60)
# -
# ### Format the commuting network
# +
cn = | pd.read_csv("%s/commuter_networks.csv" % sim_config_path) | pandas.read_csv |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from utils.utils import CONSTANTS
from utils.utils import publication_plot_pred_act, publication_plot_residuals
from use_crabnet import predict_crabnet
from use_densenet import predict_densenet
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
# %%
plt.rcParams.update({'font.size': 16})
cons = CONSTANTS()
mat_props_units = cons.mp_units_dict
mat_props = cons.mps
mat_props_names = cons.mp_names
pretty_mp_names = cons.mp_names_dict
# %%
def plot_compare_lcs(times,
maes,
mat_prop,
classic_results=None,
ax=None):
mp_sym_dict = cons.mp_sym_dict
mp_units_dict = cons.mp_units_dict
fig = None
if classic_results is not None:
classic_time = classic_results[0]
classic_mae = classic_results[1]
crab_time, dense_time = times
crab_mae, dense_mae = maes
x_crab = np.arange(len(crab_mae))
x_dense = np.arange(len(dense_mae))
x_crab = np.linspace(0, crab_time, len(crab_mae))
x_dense = np.linspace(0, dense_time, len(dense_mae))
# Plot training curve
if ax is None:
fig, ax = plt.subplots(figsize=(6, 6))
ax.plot(x_crab, crab_mae,
'-', color=cons.crab_red, marker='o', ms=0, alpha=1,
label='CrabNet')
ax.plot(x_dense, dense_mae,
'-', color=cons.dense_blue, marker='s', ms=0, alpha=1,
label='DenseNet')
ax.axhline(np.min(dense_mae), color=cons.dense_blue, linestyle='--',
alpha=1)
ax.set_xlabel('Training time [s]')
ax.plot([crab_time, dense_time], [crab_mae.iloc[-5:].mean(),
dense_mae.iloc[-5:].mean()],
'kX', ms=14, mfc='gold', label='1000 epochs')
ymax = 1.5*np.mean(dense_mae)
if classic_results is not None:
classic_x = classic_time
classic_y = 1.5*np.mean(dense_mae)
if classic_time > 1.2 * np.max(crab_time):
classic_x = np.max(crab_time)
ax.plot([classic_x*(14/20), classic_x], [classic_mae, classic_mae],
'g-', linewidth=5)
ax.plot(classic_x, classic_mae, '>', mec='green', ms=12,
mfc='white', mew=3, label='Best classic')
ax.text(classic_x, classic_mae, f'({classic_time:0.0f} s) \n',
horizontalalignment='right', verticalalignment='center')
elif classic_mae > ymax:
classic_mae = ymax * 0.97
ax.plot([classic_x, classic_x], [classic_mae*(16.5/20), classic_mae],
'g-', linewidth=5)
ax.plot(classic_x, classic_mae, '^', mec='green', ms=12,
mfc='white', mew=3, label='Best classic')
txt = f'\n\n({classic_mae:0.2f} {mp_units_dict[mat_prop]}) '
ax.text(classic_x, classic_mae*(16.5/20), txt,
horizontalalignment='center', verticalalignment='center')
else:
ax.plot(classic_x, classic_mae, 'o', mec='green', ms=12,
mfc='white', mew=4, label='Best classic')
ax.set_ylabel(f'MAE of {mp_sym_dict[mat_prop]} '
f'[{mp_units_dict[mat_prop]}]')
ax.set_ylim(np.min(crab_mae)/1.5, ymax)
ax.tick_params(left=True, top=True, right=True, direction='in', length=7)
ax.tick_params(which='minor', left=True, top=True, right=True,
direction='in', length=4)
minor_locator_x = AutoMinorLocator(2)
minor_locator_y = AutoMinorLocator(2)
ax.xaxis.set_minor_locator(minor_locator_x)
ax.yaxis.set_minor_locator(minor_locator_y)
# Get all plot labels for legend and label legend
lines, labels = ax.get_legend_handles_labels()
ax.legend(lines,
labels,
loc='best',
prop={'size': 12})
if fig is not None:
return fig
def multi_plots_lcs(nn_dir, classics_dir):
files = os.listdir(classics_dir)
classics_results_csv = classics_dir + [file for file in files
if 'test_scores.csv' in file][0]
df_classics = pd.read_csv(classics_results_csv)
files = os.listdir(nn_dir)
# print(files)
nn_results_csv = nn_dir + [file for file in files
if 'all_results' in file
if '.csv' in file][0]
df_nn = pd.read_csv(nn_results_csv)
mat_props = df_nn['mat_prop'].unique()
seeds = df_nn['rng_seed'].unique()
seed_values = {seed: 0 for seed in seeds}
df_crabnet = df_nn[df_nn['model_type'] == 'CrabNet']
for mp in mat_props:
df_mp = df_crabnet
mp_bools = df_mp['mat_prop'] == mp
best_mae = np.min(df_mp[mp_bools]['mae_val'])
pc_mae = (df_mp[mp_bools]['mae_val'] - best_mae) / best_mae
imp_col = pd.Series(pc_mae, name='improvement')
df_mp = pd.concat([df_mp, imp_col], axis=1)
df_mp = df_mp[df_mp['mat_prop'] == mp].sort_values(by='improvement')
df_mp_seeds = df_mp['rng_seed']
for i, seed in enumerate(df_mp_seeds):
seed_values[seed] += (df_mp.iloc[i]['improvement'])
ranked_seeds = pd.Series(seed_values).sort_values()
seed = ranked_seeds.index[0]
df_nn = df_nn[df_nn['rng_seed'] == seed]
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
mats = ['energy_atom', 'Egap', 'agl_thermal_conductivity_300K',
'ael_debye_temperature']
for mp, ax in zip(mats, axes.ravel()):
run_ids = df_nn[df_nn['mat_prop'] == mp]
crab_id = run_ids[run_ids['model_type'] == 'CrabNet']['id'].values[0]
dense_id = run_ids[run_ids['model_type'] == 'DenseNet']['id'].values[0]
crab_df = pd.read_csv(f'{nn_dir}/{crab_id}/progress.csv')
dense_df = pd.read_csv(f'{nn_dir}/{dense_id}/progress.csv')
crab_maes = crab_df['mae_val']
dense_maes = dense_df['mae_val']
crab_bools = run_ids['model_type'] == 'CrabNet'
dense_bools = run_ids['model_type'] == 'DenseNet'
crab_time = run_ids[crab_bools]['fit_time'].values[0]
dense_time = run_ids[dense_bools]['fit_time'].values[0]
df_classic = df_classics[df_classics['mat_prop'] == mp]
classic_mae = df_classic['mae_test'].values[0]
classic_time = df_classic['fit_time'].values[0]
plot_compare_lcs((crab_time, dense_time),
(crab_maes, dense_maes),
mp,
(classic_time, classic_mae),
ax=ax)
plt.subplots_adjust(wspace=0.22)
out_dir = r'figures/learning_curves/'
os.makedirs(out_dir, exist_ok=True)
fig_file = os.path.join(out_dir, f'four_panel_learning_curve.png')
if fig is not None:
fig.savefig(fig_file,
dpi=300,
bbox_inches='tight')
# %%
def plot_dense_crab_preds(mp, ax):
test_file = f'test_files/{mp}_test.csv'
fig = None
if ax is None:
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
y_act_dense, y_pred_dense = predict_densenet(mp, test_file)
fig_dense = publication_plot_pred_act(y_act_dense,
y_pred_dense,
mat_prop=mp,
model='DenseNet',
ax=ax[0])
y_act_crab, y_pred_crab = predict_crabnet(mp, test_file)
fig_crab = publication_plot_pred_act(y_act_crab,
y_pred_crab,
mat_prop=mp,
model='CrabNet',
ax=ax[1])
if fig is not None:
return fig
def multi_plots_preds():
mat_props = ['energy_atom', 'Egap']
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
for i, mp in enumerate(mat_props):
ax = axes[i, :]
ax = plot_dense_crab_preds(mp, ax)
plt.subplots_adjust(wspace=0.22)
out_dir = r'figures/pred_vs_act/'
os.makedirs(out_dir, exist_ok=True)
fig_file = os.path.join(out_dir, f'four_panel_pred_vs_act.png')
if fig is not None:
fig.savefig(fig_file,
dpi=300,
bbox_inches='tight')
# %%
def plot_dense_crab_residuals(mp, ax):
test_file = f'test_files/{mp}_test.csv'
fig = None
if ax is None:
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
y_act_dense, y_pred_dense = predict_densenet(mp, test_file)
fig_dense = publication_plot_residuals(y_act_dense,
y_pred_dense,
mat_prop=mp,
model='DenseNet',
ax=ax[0])
y_act_crab, y_pred_crab = predict_crabnet(mp, test_file)
fig_crab = publication_plot_residuals(y_act_crab,
y_pred_crab,
mat_prop=mp,
model='CrabNet',
ax=ax[1])
y0_min, y0_max = ax[0].get_ylim()
y1_min, y1_max = ax[1].get_ylim()
y_min_min = np.min([y0_min, y1_min])
y_max_max = np.max([y0_max, y1_max])
ax[0].set_ylim(y_min_min, y_max_max)
ax[1].set_ylim(y_min_min, y_max_max)
if fig is not None:
return fig
def multi_plots_residuals():
mat_props = ['energy_atom', 'Egap']
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
for i, mp in enumerate(mat_props):
ax = axes[i, :]
ax = plot_dense_crab_residuals(mp, ax)
plt.subplots_adjust(wspace=0.22)
out_dir = r'figures/residuals/'
os.makedirs(out_dir, exist_ok=True)
fig_file = os.path.join(out_dir, f'four_panel_residuals.png')
if fig is not None:
fig.savefig(fig_file,
dpi=300,
bbox_inches='tight')
# %%
def get_figures(nn_dir, classics_dir):
files = os.listdir(classics_dir)
classics_results_csv = classics_dir + [file for file in files if
'test_scores.csv' in file][0]
df_classics = | pd.read_csv(classics_results_csv) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[62]:
import pandas as pd
import plotly.graph_objects as go
import france_data_management as data
import datetime as dt
from datetime import timedelta
from dateutil.relativedelta import relativedelta
PATH = "../../"
# In[63]:
df_vacsi = data.import_data_vacsi_fra()
# In[ ]:
# In[64]:
def nbWithSpaces(nb):
str_nb = str(int(round(nb)))
if(nb>100000):
return str_nb[:3] + " " + str_nb[3:]
elif(nb>10000):
return str_nb[:2] + " " + str_nb[2:]
elif(nb>1000):
return str_nb[:1] + " " + str_nb[1:]
else:
return str_nb
# In[185]:
fig = go.Figure()
DATE_DEBUT = "2021-09-01"
date_5_mois = (pd.to_datetime(DATE_DEBUT) - relativedelta(months=5) - timedelta(days=2)).strftime(format="%Y-%m-%d")
date_7_mois = ( | pd.to_datetime(DATE_DEBUT) | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/3/21 17:40
Desc: 天天基金网-基金档案-投资组合
http://fundf10.eastmoney.com/ccmx_000001.html
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
from akshare.utils import demjson
def fund_portfolio_hold_em(symbol: str = "162411", date: str = "2020") -> pd.DataFrame:
"""
天天基金网-基金档案-投资组合-基金持仓
http://fundf10.eastmoney.com/ccmx_000001.html
:param symbol: 基金代码
:type symbol: str
:param date: 查询年份
:type date: str
:return: 基金持仓
:rtype: pandas.DataFrame
"""
url = "http://fundf10.eastmoney.com/FundArchivesDatas.aspx"
params = {
"type": "jjcc",
"code": symbol,
"topline": "200",
"year": date,
"month": "",
"rt": "0.913877030254846",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -1])
soup = BeautifulSoup(data_json["content"], "lxml")
item_label = [
item.text.split("\xa0\xa0")[1]
for item in soup.find_all("h4", attrs={"class": "t"})
]
big_df = pd.DataFrame()
for item in range(len(item_label)):
temp_df = pd.read_html(data_json["content"], converters={"股票代码": str})[item]
del temp_df["相关资讯"]
temp_df["占净值比例"] = temp_df["占净值比例"].str.split("%", expand=True).iloc[:, 0]
temp_df.rename(columns={"持股数(万股)": "持股数", "持仓市值(万元)": "持仓市值"}, inplace=True)
temp_df.rename(columns={"持股数(万股)": "持股数", "持仓市值(万元人民币)": "持仓市值"}, inplace=True)
temp_df["季度"] = item_label[item]
temp_df = temp_df[
[
"序号",
"股票代码",
"股票名称",
"占净值比例",
"持股数",
"持仓市值",
"季度",
]
]
big_df = big_df.append(temp_df, ignore_index=True)
big_df["占净值比例"] = pd.to_numeric(big_df["占净值比例"], errors="coerce")
big_df["持股数"] = pd.to_numeric(big_df["持股数"], errors="coerce")
big_df["持仓市值"] = pd.to_numeric(big_df["持仓市值"], errors="coerce")
big_df["序号"] = range(1, len(big_df) + 1)
return big_df
def fund_portfolio_bond_hold_em(symbol: str = "000001", date: str = "2021") -> pd.DataFrame:
"""
天天基金网-基金档案-投资组合-债券持仓
http://fundf10.eastmoney.com/ccmx1_000001.html
:param symbol: 基金代码
:type symbol: str
:param date: 查询年份
:type date: str
:return: 债券持仓
:rtype: pandas.DataFrame
"""
url = "http://fundf10.eastmoney.com/FundArchivesDatas.aspx"
params = {
"type": "zqcc",
"code": symbol,
"year": date,
"rt": "0.913877030254846",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -1])
soup = BeautifulSoup(data_json["content"], "lxml")
item_label = [
item.text.split("\xa0\xa0")[1]
for item in soup.find_all("h4", attrs={"class": "t"})
]
big_df = pd.DataFrame()
for item in range(len(item_label)):
temp_df = pd.read_html(data_json["content"], converters={"债券代码": str})[item]
temp_df["占净值比例"] = temp_df["占净值比例"].str.split("%", expand=True).iloc[:, 0]
temp_df.rename(columns={"持仓市值(万元)": "持仓市值"}, inplace=True)
temp_df["季度"] = item_label[item]
temp_df = temp_df[
[
"序号",
"债券代码",
"债券名称",
"占净值比例",
"持仓市值",
"季度",
]
]
big_df = big_df.append(temp_df, ignore_index=True)
big_df["占净值比例"] = pd.to_numeric(big_df["占净值比例"], errors="coerce")
big_df["持仓市值"] = pd.to_numeric(big_df["持仓市值"], errors="coerce")
big_df["序号"] = range(1, len(big_df) + 1)
return big_df
def fund_portfolio_industry_allocation_em(symbol: str = "000001", date: str = "2021") -> pd.DataFrame:
"""
天天基金网-基金档案-投资组合-行业配置
http://fundf10.eastmoney.com/hytz_000001.html
:param symbol: 基金代码
:type symbol: str
:param date: 查询年份
:type date: str
:return: 行业配置
:rtype: pandas.DataFrame
"""
url = "http://api.fund.eastmoney.com/f10/HYPZ/"
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'api.fund.eastmoney.com',
'Pragma': 'no-cache',
'Referer': 'http://fundf10.eastmoney.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.82 Safari/537.36',
}
params = {
'fundCode': symbol,
'year': date,
'callback': 'jQuery183006997159478989867_1648016188499',
'_': '1648016377955',
}
r = requests.get(url, params=params, headers=headers)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -1])
temp_list = []
for item in data_json["Data"]["QuarterInfos"]:
temp_list.extend(item["HYPZInfo"])
temp_df = pd.DataFrame(temp_list)
temp_df.reset_index(inplace=True)
temp_df['index'] = temp_df.index + 1
temp_df.columns = [
"序号",
"-",
"截止时间",
"-",
"行业类别",
"市值",
"-",
"占净值比例",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
]
temp_df = temp_df[
[
"序号",
"行业类别",
"占净值比例",
"市值",
"截止时间",
]
]
temp_df["市值"] = pd.to_numeric(temp_df["市值"])
temp_df["占净值比例"] = pd.to_numeric(temp_df["占净值比例"], errors="coerce")
return temp_df
def fund_portfolio_change_em(
symbol: str = "003567", indicator: str = "累计买入", date: str = "2020"
) -> pd.DataFrame:
"""
天天基金网-基金档案-投资组合-重大变动
http://fundf10.eastmoney.com/ccbd_000001.html
:param symbol: 基金代码
:type symbol: str
:param indicator: choice of {"累计买入", "累计卖出"}
:type indicator: str
:param date: 查询年份
:type date: str
:return: 重大变动
:rtype: pandas.DataFrame
"""
indicator_map = {
"累计买入": "1",
"累计卖出": "2",
}
url = "http://fundf10.eastmoney.com/FundArchivesDatas.aspx"
params = {
"type": "zdbd",
"code": symbol,
"zdbd": indicator_map[indicator],
"year": date,
"rt": "0.913877030254846",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -1])
soup = BeautifulSoup(data_json["content"], "lxml")
item_label = [
item.text.split("\xa0\xa0")[1]
for item in soup.find_all("h4", attrs={"class": "t"})
]
big_df = | pd.DataFrame() | pandas.DataFrame |
from sapextractor.database_connection.interface import DatabaseConnection
from sapextractor.utils.string_matching import find_corr
import pandas as pd
from getpass import getpass
from sapextractor.utils import constants
import time
class OracleConnection(DatabaseConnection):
def __init__(self, hostname="127.0.0.1", port="1521", sid="xe", username="system", password="<PASSWORD>"):
import cx_Oracle
self.TIMESTAMP_FORMAT = "%Y%m%d %H%M%S"
self.DATE_FORMAT_INTERNAL = "%Y%m%d"
self.HOUR_FORMAT_INTERNAL = "%H%M%S"
self.DATE_FORMAT = "%Y%m%d"
constants.TIMESTAMP_FORMAT = self.TIMESTAMP_FORMAT
constants.DATE_FORMAT_INTERNAL = self.DATE_FORMAT_INTERNAL
constants.HOUR_FORMAT_INTERNAL = self.HOUR_FORMAT_INTERNAL
constants.DATE_FORMAT = self.DATE_FORMAT
self.table_prefix = "SAPSR3."
self.con = cx_Oracle.connect(username, password, hostname + ":" + str(port) + "/" + str(sid), encoding="UTF-8",
events=True)
DatabaseConnection.__init__(self)
def execute_read_sql(self, sql, columns):
cursor = self.con.cursor()
cursor.prefetchrows = constants.ORACLE_ARRAYSIZE
cursor.arraysize = constants.ORACLE_ARRAYSIZE
print(time.time(), "executing: "+sql)
cursor.execute(sql)
stream = []
df = []
while True:
res = cursor.fetchmany(10000)
if len(res) == 0:
break
for row in res:
el = {}
for idx, col in enumerate(columns):
el[col] = row[idx]
stream.append(el)
this_dataframe = pd.DataFrame(stream)
df.append(this_dataframe)
stream = None
stream = []
if df:
df = pd.concat(df)
else:
df = | pd.DataFrame({x: [] for x in columns}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from tkinter import filedialog
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from scipy.interpolate import make_interp_spline, BSpline
from mpldatacursor import datacursor
from matplotlib import style
from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from bisect import bisect_left
from scipy import interpolate
import math
import matplotlib.pyplot as plt
import matplotlib
import tkinter as tk
import pandas as pd
import glob
import numpy as np
import matplotlib.pylab as pylab
from scipy.optimize import root_scalar
params = {'legend.fontsize': 'x-large',
'figure.figsize': (15, 5),
'axes.labelsize': 'xx-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
matplotlib.use('Qt5Agg')
style.use("ggplot")
def dBm2W(dBm):
return 10**(dBm/10)
def graficoBunito(x, y, points):
xnew = np.linspace(x.min(), x.max(), int(points))
spl = make_interp_spline(x, y, k=3) #BSpline object
ynew = spl(xnew)
return xnew, ynew, spl
class Ui_MainWindow(QtWidgets.QMainWindow):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1280, 720)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_3 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_3.setObjectName("gridLayout_3")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setEnabled(True)
self.tabWidget.setFocusPolicy(QtCore.Qt.NoFocus)
self.tabWidget.setObjectName("tabWidget")
self.diagRad = QtWidgets.QWidget()
self.diagRad.setObjectName("diagRad")
self.gridLayout_2 = QtWidgets.QGridLayout(self.diagRad)
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout_8 = QtWidgets.QVBoxLayout()
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.diagRad)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.label_2 = QtWidgets.QLabel(self.diagRad)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.horizontalLayout.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.folderPath = QtWidgets.QLineEdit(self.diagRad)
self.folderPath.setEnabled(True)
self.folderPath.setReadOnly(True)
self.folderPath.setObjectName("folderPath")
self.verticalLayout_2.addWidget(self.folderPath)
self.folderPath_4 = QtWidgets.QLineEdit(self.diagRad)
self.folderPath_4.setReadOnly(True)
self.folderPath_4.setClearButtonEnabled(False)
self.folderPath_4.setObjectName("folderPath_4")
self.verticalLayout_2.addWidget(self.folderPath_4)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.verticalLayout_7 = QtWidgets.QVBoxLayout()
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.browseFolder = QtWidgets.QPushButton(self.diagRad)
self.browseFolder.setObjectName("browseFolder")
self.verticalLayout_7.addWidget(self.browseFolder)
self.browseFolder_4 = QtWidgets.QPushButton(self.diagRad)
self.browseFolder_4.setObjectName("browseFolder_4")
self.verticalLayout_7.addWidget(self.browseFolder_4)
self.horizontalLayout.addLayout(self.verticalLayout_7)
self.verticalLayout_8.addLayout(self.horizontalLayout)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_freq = QtWidgets.QLabel(self.diagRad)
self.label_freq.setObjectName("label_freq")
self.horizontalLayout_2.addWidget(self.label_freq)
self.cb_frequency_4 = QtWidgets.QComboBox(self.diagRad)
self.cb_frequency_4.setObjectName("cb_frequency_4")
self.horizontalLayout_2.addWidget(self.cb_frequency_4)
self.horizontalLayout_4.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_what_plot = QtWidgets.QLabel(self.diagRad)
self.label_what_plot.setObjectName("label_what_plot")
self.horizontalLayout_3.addWidget(self.label_what_plot)
self.cb_what_plot = QtWidgets.QComboBox(self.diagRad)
self.cb_what_plot.setObjectName("cb_what_plot")
self.horizontalLayout_3.addWidget(self.cb_what_plot)
self.horizontalLayout_4.addLayout(self.horizontalLayout_3)
self.saveCsv = QtWidgets.QPushButton(self.diagRad)
self.saveCsv.setObjectName("saveCsv")
self.horizontalLayout_4.addWidget(self.saveCsv)
self.verticalLayout_8.addLayout(self.horizontalLayout_4)
self.gridLayout_2.addLayout(self.verticalLayout_8, 0, 0, 1, 1)
'''
self.graphicsView = QtWidgets.QGraphicsView(self.diagRad)
self.graphicsView.setObjectName("graphicsView")
'''
self.canvas = FigureCanvas(Figure(figsize=(7, 7)))
self.ax = self.canvas.figure.add_subplot(111, polar=True)
self.ax.set_theta_zero_location("N")
self.ax.autoscale(enable = False)
self.ax.set_rmax(-15)
self.ax.set_rmin(-45)
self.gridLayout_2.addWidget(self.canvas, 1, 0, 1, 1)
self.toolbar = NavigationToolbar(self.canvas, self)
self.gridLayout_2.addWidget(self.toolbar, 2, 0, 1, 1)
self.splitter = QtWidgets.QSplitter(self.diagRad)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.normalize = QtWidgets.QCheckBox(self.splitter)
self.normalize.setObjectName("normalize")
self.hold = QtWidgets.QCheckBox(self.splitter)
self.hold.setObjectName("hold")
self.clearBtn_2 = QtWidgets.QPushButton(self.splitter)
self.clearBtn_2.setObjectName("clearBtn_2")
self.gridLayout_2.addWidget(self.splitter, 3, 0, 1, 1)
self.tabWidget.addTab(self.diagRad, "")
self.dist = QtWidgets.QWidget()
self.dist.setObjectName("dist")
self.gridLayout_4 = QtWidgets.QGridLayout(self.dist)
self.gridLayout_4.setObjectName("gridLayout_4")
self.horizontalLayout_25 = QtWidgets.QHBoxLayout()
self.horizontalLayout_25.setObjectName("horizontalLayout_25")
self.horizontalLayout_26 = QtWidgets.QHBoxLayout()
self.horizontalLayout_26.setObjectName("horizontalLayout_26")
self.label_13 = QtWidgets.QLabel(self.dist)
self.label_13.setObjectName("label_13")
self.horizontalLayout_26.addWidget(self.label_13)
self.folderPath_2 = QtWidgets.QLineEdit(self.dist)
self.folderPath_2.setObjectName("folderPath_2")
self.folderPath_2.setReadOnly(True)
self.horizontalLayout_26.addWidget(self.folderPath_2)
self.horizontalLayout_25.addLayout(self.horizontalLayout_26)
self.browseFolder_2 = QtWidgets.QPushButton(self.dist)
self.browseFolder_2.setObjectName("browseFolder_2")
self.horizontalLayout_25.addWidget(self.browseFolder_2)
self.gridLayout_4.addLayout(self.horizontalLayout_25, 0, 0, 1, 1)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.horizontalLayout_27 = QtWidgets.QHBoxLayout()
self.horizontalLayout_27.setObjectName("horizontalLayout_27")
self.label_14 = QtWidgets.QLabel(self.dist)
self.label_14.setObjectName("label_14")
self.horizontalLayout_27.addWidget(self.label_14)
self.cb_frequency_2 = QtWidgets.QComboBox(self.dist)
self.cb_frequency_2.setObjectName("cb_frequency_2")
self.horizontalLayout_27.addWidget(self.cb_frequency_2)
self.horizontalLayout_5.addLayout(self.horizontalLayout_27)
self.horizontalLayout_28 = QtWidgets.QHBoxLayout()
self.horizontalLayout_28.setObjectName("horizontalLayout_28")
self.label_15 = QtWidgets.QLabel(self.dist)
self.label_15.setObjectName("label_15")
self.horizontalLayout_28.addWidget(self.label_15)
self.cb_what_plot_2 = QtWidgets.QComboBox(self.dist)
self.cb_what_plot_2.setObjectName("cb_what_plot_2")
self.horizontalLayout_28.addWidget(self.cb_what_plot_2)
self.horizontalLayout_5.addLayout(self.horizontalLayout_28)
self.saveCsv_2 = QtWidgets.QPushButton(self.dist)
self.saveCsv_2.setObjectName("saveCsv_2")
self.horizontalLayout_5.addWidget(self.saveCsv_2)
self.gridLayout_4.addLayout(self.horizontalLayout_5, 1, 0, 1, 1)
self.canvas_2 = FigureCanvas(Figure(figsize=(7, 7)))
self.ax_2 = self.canvas_2.figure.add_subplot(111)
self.gridLayout_4.addWidget(self.canvas_2, 2, 0, 1, 1)
self.toolbar_2 = NavigationToolbar(self.canvas_2, self)
self.gridLayout_4.addWidget(self.toolbar_2, 3, 0, 1 ,1)
self.splitter_4 = QtWidgets.QSplitter(self.dist)
self.splitter_4.setOrientation(QtCore.Qt.Horizontal)
self.splitter_4.setObjectName("splitter_4")
self.normalize_2 = QtWidgets.QCheckBox(self.splitter_4)
self.normalize_2.setObjectName("normalize_2")
self.hold_2 = QtWidgets.QCheckBox(self.splitter_4)
self.hold_2.setObjectName("hold_2")
self.clearBtn_3 = QtWidgets.QPushButton(self.splitter_4)
self.clearBtn_3.setObjectName("clearBtn_3")
self.gridLayout_4.addWidget(self.splitter_4, 4, 0, 1, 1)
self.tabWidget.addTab(self.dist, "")
self.perdas = QtWidgets.QWidget()
self.perdas.setObjectName("perdas")
self.gridLayout_5 = QtWidgets.QGridLayout(self.perdas)
self.gridLayout_5.setObjectName("gridLayout_5")
self.verticalLayout_15 = QtWidgets.QVBoxLayout()
self.verticalLayout_15.setObjectName("verticalLayout_15")
self.verticalLayout_16 = QtWidgets.QVBoxLayout()
self.verticalLayout_16.setObjectName("verticalLayout_16")
self.verticalLayout_17 = QtWidgets.QVBoxLayout()
self.verticalLayout_17.setObjectName("verticalLayout_17")
self.horizontalLayout_31 = QtWidgets.QHBoxLayout()
self.horizontalLayout_31.setObjectName("horizontalLayout_31")
self.horizontalLayout_32 = QtWidgets.QHBoxLayout()
self.horizontalLayout_32.setObjectName("horizontalLayout_32")
self.label_16 = QtWidgets.QLabel(self.perdas)
self.label_16.setObjectName("label_16")
self.horizontalLayout_32.addWidget(self.label_16)
self.folderPath_3 = QtWidgets.QLineEdit(self.perdas)
self.folderPath_3.setObjectName("folderPath_3")
self.folderPath_3.setReadOnly(True)
self.horizontalLayout_32.addWidget(self.folderPath_3)
self.horizontalLayout_31.addLayout(self.horizontalLayout_32)
self.browseFolder_3 = QtWidgets.QPushButton(self.perdas)
self.browseFolder_3.setObjectName("browseFolder_3")
self.horizontalLayout_31.addWidget(self.browseFolder_3)
self.verticalLayout_17.addLayout(self.horizontalLayout_31)
self.verticalLayout_16.addLayout(self.verticalLayout_17)
self.canvas_3 = FigureCanvas(Figure(figsize=(7, 7)))
self.ax_3 = self.canvas_3.figure.add_subplot(111)
self.verticalLayout_16.addWidget(self.canvas_3)
self.toolbar_3 = NavigationToolbar(self.canvas_3, self)
self.verticalLayout_16.addWidget(self.toolbar_3)
self.verticalLayout_15.addLayout(self.verticalLayout_16)
self.splitter_5 = QtWidgets.QSplitter(self.perdas)
self.splitter_5.setOrientation(QtCore.Qt.Horizontal)
self.splitter_5.setObjectName("splitter_5")
self.verticalLayout_15.addWidget(self.splitter_5)
self.gridLayout_5.addLayout(self.verticalLayout_15, 0, 0, 1, 1)
self.tabWidget.addTab(self.perdas, "")
self.tab = QtWidgets.QWidget()
self.tab.setEnabled(True)
self.tab.setObjectName("tab")
self.gridLayout_7 = QtWidgets.QGridLayout(self.tab)
self.gridLayout_7.setObjectName("gridLayout_7")
self.splitter_2 = QtWidgets.QSplitter(self.tab)
self.splitter_2.setOrientation(QtCore.Qt.Horizontal)
self.splitter_2.setObjectName("splitter_2")
self.layoutWidget = QtWidgets.QWidget(self.splitter_2)
self.layoutWidget.setObjectName("layoutWidget")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.layoutWidget)
self.horizontalLayout_8.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_3 = QtWidgets.QLabel(self.layoutWidget)
self.label_3.setObjectName("label_3")
self.verticalLayout_4.addWidget(self.label_3)
self.label_4 = QtWidgets.QLabel(self.layoutWidget)
self.label_4.setObjectName("label_4")
self.verticalLayout_4.addWidget(self.label_4)
self.label_freq_2 = QtWidgets.QLabel(self.layoutWidget)
self.label_freq_2.setObjectName("label_freq_2")
self.verticalLayout_4.addWidget(self.label_freq_2)
self.horizontalLayout_8.addLayout(self.verticalLayout_4)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.folderPath_5 = QtWidgets.QLineEdit(self.layoutWidget)
self.folderPath_5.setMinimumSize(QtCore.QSize(81, 0))
self.folderPath_5.setObjectName("folderPath_5")
self.folderPath_5.setReadOnly(True)
self.verticalLayout_5.addWidget(self.folderPath_5)
self.folderPath_6 = QtWidgets.QLineEdit(self.layoutWidget)
self.folderPath_6.setMinimumSize(QtCore.QSize(81, 20))
self.folderPath_6.setObjectName("folderPath_6")
self.folderPath_6.setReadOnly(True)
self.verticalLayout_5.addWidget(self.folderPath_6)
self.cb_frequency_3 = QtWidgets.QComboBox(self.layoutWidget)
self.cb_frequency_3.setMinimumSize(QtCore.QSize(81, 20))
self.cb_frequency_3.setObjectName("cb_frequency_3")
self.verticalLayout_5.addWidget(self.cb_frequency_3)
self.horizontalLayout_8.addLayout(self.verticalLayout_5)
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.browseFolder_6 = QtWidgets.QPushButton(self.layoutWidget)
self.browseFolder_6.setObjectName("browseFolder_6")
self.verticalLayout_6.addWidget(self.browseFolder_6)
self.browseFolder_5 = QtWidgets.QPushButton(self.layoutWidget)
self.browseFolder_5.setObjectName("browseFolder_5")
self.verticalLayout_6.addWidget(self.browseFolder_5)
self.saveCsv_3 = QtWidgets.QPushButton(self.layoutWidget)
self.saveCsv_3.setObjectName("saveCsv_3")
self.verticalLayout_6.addWidget(self.saveCsv_3)
self.horizontalLayout_8.addLayout(self.verticalLayout_6)
self.line = QtWidgets.QFrame(self.splitter_2)
self.line.setMaximumSize(QtCore.QSize(3, 16777215))
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.widget = QtWidgets.QWidget(self.splitter_2)
self.widget.setObjectName("widget")
self.gridLayout_6 = QtWidgets.QGridLayout(self.widget)
self.gridLayout_6.setContentsMargins(0, 0, 0, 0)
self.gridLayout_6.setObjectName("gridLayout_6")
self.verticalLayout_12 = QtWidgets.QVBoxLayout()
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.GainCheckBox = QtWidgets.QCheckBox(self.widget)
self.GainCheckBox.setObjectName("GainCheckBox")
self.verticalLayout_12.addWidget(self.GainCheckBox)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_5 = QtWidgets.QLabel(self.widget)
self.label_5.setObjectName("label_5")
self.horizontalLayout_7.addWidget(self.label_5)
self.cb_Gain_1 = QtWidgets.QComboBox(self.widget)
self.cb_Gain_1.setMinimumSize(QtCore.QSize(81, 20))
self.cb_Gain_1.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.cb_Gain_1.setObjectName("cb_Gain_1")
self.horizontalLayout_7.addWidget(self.cb_Gain_1)
self.verticalLayout_12.addLayout(self.horizontalLayout_7)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_6 = QtWidgets.QLabel(self.widget)
self.label_6.setObjectName("label_6")
self.horizontalLayout_6.addWidget(self.label_6)
self.cb_Gain_2 = QtWidgets.QComboBox(self.widget)
self.cb_Gain_2.setMinimumSize(QtCore.QSize(81, 20))
self.cb_Gain_2.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.cb_Gain_2.setObjectName("cb_Gain_2")
self.horizontalLayout_6.addWidget(self.cb_Gain_2)
self.verticalLayout_12.addLayout(self.horizontalLayout_6)
self.gridLayout_6.addLayout(self.verticalLayout_12, 0, 0, 1, 1)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_10 = QtWidgets.QLabel(self.widget)
self.label_10.setText("")
self.label_10.setObjectName("label_10")
self.verticalLayout_3.addWidget(self.label_10)
self.label_7 = QtWidgets.QLabel(self.widget)
self.label_7.setObjectName("label_7")
self.verticalLayout_3.addWidget(self.label_7)
self.line_Gain_Output = QtWidgets.QLineEdit(self.widget)
self.line_Gain_Output.setMinimumSize(QtCore.QSize(81, 20))
self.line_Gain_Output.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.line_Gain_Output.setObjectName("line_Gain_Output")
self.line_Gain_Output.setReadOnly(True)
self.verticalLayout_3.addWidget(self.line_Gain_Output)
self.gridLayout_6.addLayout(self.verticalLayout_3, 0, 1, 1, 1)
self.gridLayout_7.addWidget(self.splitter_2, 0, 0, 1, 1)
self.canvas_4 = FigureCanvas(Figure(figsize=(7, 7)))
self.ax_4 = self.canvas_4.figure.add_subplot(111)
self.gridLayout_7.addWidget(self.canvas_4, 1, 0, 1, 1)
self.toolbar_4 = NavigationToolbar(self.canvas_4, self)
self.gridLayout_7.addWidget(self.toolbar_4, 2, 0, 1, 1)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.normalize_3 = QtWidgets.QCheckBox(self.tab)
self.normalize_3.setObjectName("normalize_3")
self.gridLayout.addWidget(self.normalize_3, 3, 0, 1, 1)
self.hold_3 = QtWidgets.QCheckBox(self.tab)
self.hold_3.setObjectName("hold_3")
self.gridLayout.addWidget(self.hold_3, 3, 1, 1, 1)
self.clearBtn_4 = QtWidgets.QPushButton(self.tab)
self.clearBtn_4.setObjectName("clearBtn_4")
self.gridLayout.addWidget(self.clearBtn_4, 3, 2, 1, 1)
self.gridLayout_7.addLayout(self.gridLayout, 3, 0, 1, 1)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.gridLayout_9 = QtWidgets.QGridLayout(self.tab_2)
self.gridLayout_9.setObjectName("gridLayout_9")
self.gridLayout_8 = QtWidgets.QGridLayout()
self.gridLayout_8.setObjectName("gridLayout_8")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.verticalLayout_9 = QtWidgets.QVBoxLayout()
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.label_8 = QtWidgets.QLabel(self.tab_2)
self.label_8.setMaximumSize(QtCore.QSize(52, 16))
self.label_8.setObjectName("label_8")
self.verticalLayout_9.addWidget(self.label_8)
self.label_9 = QtWidgets.QLabel(self.tab_2)
self.label_9.setMaximumSize(QtCore.QSize(52, 16))
self.label_9.setObjectName("label_9")
self.verticalLayout_9.addWidget(self.label_9)
self.label_12 = QtWidgets.QLabel(self.tab_2)
self.label_12.setObjectName("label_12")
self.label_12.setMaximumSize(QtCore.QSize(52, 16))
self.verticalLayout_9.addWidget(self.label_12)
self.horizontalLayout_9.addLayout(self.verticalLayout_9)
self.verticalLayout_10 = QtWidgets.QVBoxLayout()
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.line_med1 = QtWidgets.QLineEdit(self.tab_2)
self.line_med1.setObjectName("line_med1")
self.verticalLayout_10.addWidget(self.line_med1)
self.line_med2 = QtWidgets.QLineEdit(self.tab_2)
self.line_med2.setObjectName("line_med2")
self.verticalLayout_10.addWidget(self.line_med2)
self.line_perdas = QtWidgets.QLineEdit(self.tab_2)
self.line_perdas.setObjectName("line_perdas")
self.verticalLayout_10.addWidget(self.line_perdas)
self.horizontalLayout_9.addLayout(self.verticalLayout_10)
self.verticalLayout_11 = QtWidgets.QVBoxLayout()
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.estimate_gain_1_btn = QtWidgets.QPushButton(self.tab_2)
self.estimate_gain_1_btn.setMaximumSize(QtCore.QSize(75, 20))
self.estimate_gain_1_btn.setObjectName("estimate_gain_1_btn")
self.verticalLayout_11.addWidget(self.estimate_gain_1_btn)
self.estimate_gain_2_btn = QtWidgets.QPushButton(self.tab_2)
self.estimate_gain_2_btn.setMaximumSize(QtCore.QSize(75, 20))
self.estimate_gain_2_btn.setObjectName("estimate_gain_2_btn")
self.verticalLayout_11.addWidget(self.estimate_gain_2_btn)
self.estimate_gain_3_btn = QtWidgets.QPushButton(self.tab_2)
self.estimate_gain_3_btn.setMaximumSize(QtCore.QSize(75, 20))
self.estimate_gain_3_btn.setObjectName("estimate_gain_3_btn")
self.verticalLayout_11.addWidget(self.estimate_gain_3_btn)
self.horizontalLayout_9.addLayout(self.verticalLayout_11)
self.verticalLayout_13 = QtWidgets.QVBoxLayout()
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.label_11 = QtWidgets.QLabel(self.tab_2)
self.label_11.setObjectName("label_11")
self.verticalLayout_13.addWidget(self.label_11)
self.gainEstimateFrequency = QtWidgets.QComboBox(self.tab_2)
self.gainEstimateFrequency.setObjectName("gainEstimateFrequency")
self.verticalLayout_13.addWidget(self.gainEstimateFrequency)
self.horizontalLayout_9.addLayout(self.verticalLayout_13)
self.gridLayout_8.addLayout(self.horizontalLayout_9, 0, 0, 1, 1)
self.canvas_5 = FigureCanvas(Figure(figsize=(7, 7)))
self.ax_5 = self.canvas_5.figure.add_subplot(111)
self.gridLayout_8.addWidget(self.canvas_5, 1, 0, 1, 1)
self.toolbar_5 = NavigationToolbar(self.canvas_5, self)
self.gridLayout_8.addWidget(self.toolbar_5, 2, 0, 1, 1)
'''
self.graphicsView_estimativa = QtWidgets.QGraphicsView(self.tab_2)
self.graphicsView_estimativa.setObjectName("graphicsView_estimativa")
self.gridLayout_8.addWidget(self.graphicsView_estimativa, 1, 0, 1, 1)
'''
self.gridLayout_9.addLayout(self.gridLayout_8, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_2, "")
self.gridLayout_3.addWidget(self.tabWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 782, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionQuit = QtWidgets.QAction(MainWindow)
self.actionQuit.setObjectName("actionQuit")
self.actionHelp = QtWidgets.QAction(MainWindow)
self.actionHelp.setObjectName("actionHelp")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.menuFile.addAction(self.actionQuit)
self.menuHelp.addAction(self.actionHelp)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.browseFolder.clicked.connect(self.load_csv)
self.browseFolder_2.clicked.connect(self.load_csv_2)
self.browseFolder_3.clicked.connect(self.load_csv_file)
self.browseFolder_4.clicked.connect(self.load_csv_file_3)
self.browseFolder_5.clicked.connect(self.load_csv_file_2)
self.browseFolder_6.clicked.connect(self.load_csv_3)
self.clearBtn_2.clicked.connect(self.clear_plot)
self.clearBtn_3.clicked.connect(self.clear_plot_3)
self.clearBtn_4.clicked.connect(self.clear_plot_2)
self.saveCsv.clicked.connect(self.save_csv)
self.saveCsv_2.clicked.connect(self.save_csv_2)
self.saveCsv_3.clicked.connect(self.save_csv_3)
self.cb_frequency_4.activated.connect(self.update_plot)
self.cb_frequency_2.activated.connect(self.update_plot_2)
self.cb_frequency_3.activated.connect(self.update_plot_3)
self.cb_what_plot.activated.connect(self.what_plot)
self.cb_what_plot_2.activated.connect(self.what_plot_2)
self.GainCheckBox.stateChanged.connect(self.GainEstimateEnabled)
self.cb_Gain_1.activated.connect(self.GainEstimate)
self.cb_Gain_2.activated.connect(self.GainEstimate)
self.GainEstimateEnabled = False
self.estimate_gain_1_btn.clicked.connect(self.LoadGainMeasurement1)
self.estimate_gain_2_btn.clicked.connect(self.LoadGainMeasurement2)
self.estimate_gain_3_btn.clicked.connect(self.LoadGainLossMeasurement)
self.gainEstimateFrequency.activated.connect(self.EstimateGain)
self.folderLoaded = False
self.folderLoaded_2 = False
self.lossLoaded = False
self.lossLoaded_perda = False
self.med1Loaded = False
self.med2Loaded = False
self.medPerdaLoaded = False
self.scatGain = False
def EstimateGain(self):
if not self.med1Loaded:
QtWidgets.QMessageBox.information(None, QtWidgets.qApp.tr("Open File"),
QtWidgets.qApp.tr("Medição 1 não foi carregada corretamente!"),
QtWidgets.QMessageBox.Ok)
elif not self.med2Loaded:
QtWidgets.QMessageBox.information(None, QtWidgets.qApp.tr("Open File"),
QtWidgets.qApp.tr("Medição 2 não foi carregada corretamente!"),
QtWidgets.QMessageBox.Ok)
elif not self.medPerdaLoaded:
QtWidgets.QMessageBox.information(None, QtWidgets.qApp.tr("Open File"),
QtWidgets.qApp.tr("Medição de Perdas não foi carregada corretamente!"),
QtWidgets.QMessageBox.Ok)
else:
def func(k):
return G1dB*(1 - math.exp(-k*float(D2))) - G2dB*(1 - math.exp(-k*float(D1)))
def Alfredo(k, gain, x):
return gain*(1 - np.exp(-k*x))
D1 = self.GainMed1_path.name.replace('.CSV', '')[-3:]
D2 = self.GainMed2_path.name.replace('.CSV', '')[-3:]
desFreq = round(float(self.gainEstimateFrequency.currentText())*1e9)
D1S21 = self.GainMed1[self.GainMed1.Frequency == float(desFreq)].S21.values[0]
D2S21 = self.GainMed2[self.GainMed2.Frequency == float(desFreq)].S21.values[0]
#D1S21 = S21D1[S21D1.Distancia == float(D1)].S21.values[0]
#D2S21 = S21D2[S21D2.Distancia == float(D2)].S21.values[0]
D1 = float(D1)/100
D2 = float(D2)/100
perda = self.funcaoPerdaGain(desFreq/1e9)
D1S21W = dBm2W(D1S21 - perda)
D2S21W = dBm2W(D2S21 - perda)
lmbda = 3e8/desFreq
G1 = np.sqrt(D1S21W)*(4*np.pi*float(D1))/lmbda
G2 = np.sqrt(D2S21W)*(4*np.pi*float(D2))/lmbda
if float(D1) != 0.0 and float(D2) != 0.0 and D1 != D2:
G1dB = 10*np.log10(G1)
G2dB = 10*np.log10(G2)
if self.scatGain:
print('Tem Scat', self.scatGain)
self.scatGain.remove()
#self.approxGain.pop(0).remove()
self.canvas_5.draw_idle()
self.scatGain = self.ax_5.scatter([float(D1)*100, float(D2)*100], [G1dB, G2dB], label='Medições')
print(self.scatGain)
self.canvas_5.draw_idle()
#print(f'\nOrigi = {D1S21}, perda = {perda}, S21 = {D1S21 - perda}, S21W = {D1S21W}, dist = {D1}, ganho = {G1dB}')
#print(f'Origi = {D2S21}, perda = {perda},S21 = {D2S21 - perda}, S21W = {D2S21W}, dist = {D2}, ganho = {G2dB}')
kmax = [0.1, 1000]
try:
sol = root_scalar(func, method='toms748', bracket = kmax)
k = sol.root
Gcd = G1dB/(1-math.exp(-k*float(D1)))
print(f'k = {k}, Gcd = {Gcd}')
x2 = np.arange(0, 6, 0.10)
self.approxGain = self.ax_5.plot(x2*100, Alfredo(k, Gcd, x2), label=f'G = {round(Gcd,2)} dB')
legenda = self.ax_5.legend(bbox_to_anchor=(0, 1.02, 1, .102), borderaxespad=0, loc="right")
legenda.set_draggable(True)
except:
pass
QtWidgets.QMessageBox.information(None, QtWidgets.qApp.tr("Estimativa Erro"),
QtWidgets.qApp.tr("Não foi possível achar uma solução para k = [0.1, 1000]"),
QtWidgets.QMessageBox.Ok)
def LoadGainMeasurement1(self):
root = tk.Tk()
root.withdraw()
self.GainMed1_path = filedialog.askopenfile()
try:
self.GainMed1= pd.read_csv(self.GainMed1_path, header=2, engine='python')
self.line_med1.setText(self.GainMed1_path.name)
dist1 = self.GainMed1_path.name.replace('.CSV', '')[-3:]
self.GainMed1.rename(columns = {self.GainMed1.columns[1]: 'S21', self.GainMed1.columns[2]: 'Phase'}, inplace = True)
self.gainFreq1 = self.GainMed1.Frequency.unique()/1e9
print(f'Frequências 1 = {self.gainFreq1}')
# self.freq_loss = self.df_4.iloc[:,0]/1e9
#self.loss = self.df_4.iloc[:,1]
#nada, fon, self.funcao_perda = graficoBunito(self.freq_loss, self.loss, self.freq_loss.size*3)
self.med1Loaded = True
except:
pass
QtWidgets.QMessageBox.information(None, QtWidgets.qApp.tr("Open File"),
QtWidgets.qApp.tr("Erro ao abrir Medição 1!"),
QtWidgets.QMessageBox.Ok)
def LoadGainMeasurement2(self):
root = tk.Tk()
root.withdraw()
self.GainMed2_path = filedialog.askopenfile()
try:
self.GainMed2= pd.read_csv(self.GainMed2_path, header=2, engine='python')
self.line_med2.setText(self.GainMed2_path.name)
dist1 = self.GainMed2_path.name.replace('.CSV', '')[-3:]
self.GainMed2.rename(columns = {self.GainMed2.columns[1]: 'S21', self.GainMed2.columns[2]: 'Phase'}, inplace = True)
self.gainFreq2 = self.GainMed2.Frequency.unique()/1e9
print(f'Frequências 1 = {self.gainFreq2}')
# self.freq_loss = self.df_4.iloc[:,0]/1e9
#self.loss = self.df_4.iloc[:,1]
#nada, fon, self.funcao_perda = graficoBunito(self.freq_loss, self.loss, self.freq_loss.size*3)
self.med2Loaded = True
except:
pass
QtWidgets.QMessageBox.information(None, QtWidgets.qApp.tr("Open File"),
QtWidgets.qApp.tr("Erro ao abrir Medição 2!"),
QtWidgets.QMessageBox.Ok)
if self.med1Loaded and self.med2Loaded:
print('Ambas Medições Carregadas')
if np.array_equal(self.gainFreq1, self.gainFreq2):
self.gainEstimateFrequency.clear()
self.gainEstimateFrequency.addItems([str(freq) for freq in self.gainFreq1])
else:
QtWidgets.QMessageBox.information(None, QtWidgets.qApp.tr("Open File"),
QtWidgets.qApp.tr("As medições não possuem o mesmo range de frequências medidas!"),
QtWidgets.QMessageBox.Ok)
def LoadGainLossMeasurement(self):
root = tk.Tk()
root.withdraw()
self.gainPerdaPath = filedialog.askopenfile()
try:
self.gainPerda= | pd.read_csv(self.gainPerdaPath, header=2, engine='python') | pandas.read_csv |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import importlib.resources
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
# Construct a dictionary mapping a canonical fuel name to a list of strings
# which are used to represent that fuel in the FERC Form 1 Reporting. Case is
# ignored, as all fuel strings can be converted to a lower case in the data
# set.
# Previous categories of ferc1_biomass_strings and ferc1_stream_strings have
# been deleted and their contents redistributed to ferc1_waste_strings and
# ferc1_other_strings
ferc1_coal_strings = [
'coal', 'coal-subbit', 'lignite', 'coal(sb)', 'coal (sb)', 'coal-lignite',
'coke', 'coa', 'lignite/coal', 'coal - subbit', 'coal-subb', 'coal-sub',
'coal-lig', 'coal-sub bit', 'coals', 'ciak', 'petcoke', 'coal.oil',
'coal/gas', 'bit coal', 'coal-unit #3', 'coal-subbitum', 'coal tons',
'coal mcf', 'coal unit #3', 'pet. coke', 'coal-u3', 'coal&coke', 'tons'
]
"""
list: A list of strings which are used to represent coal fuel in FERC Form 1
reporting.
"""
ferc1_oil_strings = [
'oil', '#6 oil', '#2 oil', 'fuel oil', 'jet', 'no. 2 oil', 'no.2 oil',
'no.6& used', 'used oil', 'oil-2', 'oil (#2)', 'diesel oil',
'residual oil', '# 2 oil', 'resid. oil', 'tall oil', 'oil/gas',
'no.6 oil', 'oil-fuel', 'oil-diesel', 'oil / gas', 'oil bbls', 'oil bls',
'no. 6 oil', '#1 kerosene', 'diesel', 'no. 2 oils', 'blend oil',
'#2oil diesel', '#2 oil-diesel', '# 2 oil', 'light oil', 'heavy oil',
'gas.oil', '#2', '2', '6', 'bbl', 'no 2 oil', 'no 6 oil', '#1 oil', '#6',
'oil-kero', 'oil bbl', 'biofuel', 'no 2', 'kero', '#1 fuel oil',
'no. 2 oil', 'blended oil', 'no 2. oil', '# 6 oil', 'nno. 2 oil',
'#2 fuel', 'oill', 'oils', 'gas/oil', 'no.2 oil gas', '#2 fuel oil',
'oli', 'oil (#6)', 'oil/diesel', '2 oil', '#6 hvy oil', 'jet fuel',
'diesel/compos', 'oil-8', 'oil {6}', 'oil-unit #1', 'bbl.', 'oil.',
'oil #6', 'oil (6)', 'oil(#2)', 'oil-unit1&2', 'oil-6', '#2 fue oil',
'dielel oil', 'dielsel oil', '#6 & used', 'barrels', 'oil un 1 & 2',
'jet oil', 'oil-u1&2', 'oiul', 'pil', 'oil - 2', '#6 & used', 'oial'
]
"""
list: A list of strings which are used to represent oil fuel in FERC Form 1
reporting.
"""
ferc1_gas_strings = [
'gas', 'gass', 'methane', 'natural gas', 'blast gas', 'gas mcf',
'propane', 'prop', 'natural gas', 'nat.gas', 'nat gas',
'nat. gas', 'natl gas', 'ga', 'gas`', 'syngas', 'ng', 'mcf',
'blast gaa', 'nat gas', 'gac', 'syngass', 'prop.', 'natural', 'coal.gas',
'n. gas', 'lp gas', 'natuaral gas', 'coke gas', 'gas #2016', 'propane**',
'* propane', 'propane **', 'gas expander', 'gas ct', '# 6 gas', '#6 gas',
'coke oven gas'
]
"""
list: A list of strings which are used to represent gas fuel in FERC Form 1
reporting.
"""
ferc1_solar_strings = []
ferc1_wind_strings = []
ferc1_hydro_strings = []
ferc1_nuke_strings = [
'nuclear', 'grams of uran', 'grams of', 'grams of ura',
'grams', 'nucleur', 'nulear', 'nucl', 'nucleart', 'nucelar',
'gr.uranium', 'grams of urm', 'nuclear (9)', 'nulcear', 'nuc',
'gr. uranium', 'nuclear mw da', 'grams of ura'
]
"""
list: A list of strings which are used to represent nuclear fuel in FERC Form
1 reporting.
"""
ferc1_waste_strings = [
'tires', 'tire', 'refuse', 'switchgrass', 'wood waste', 'woodchips',
'biomass', 'wood', 'wood chips', 'rdf', 'tires/refuse', 'tire refuse',
'waste oil', 'waste', 'woodships', 'tire chips'
]
"""
list: A list of strings which are used to represent waste fuel in FERC Form 1
reporting.
"""
ferc1_other_strings = [
'steam', 'purch steam', 'all', 'tdf', 'n/a', 'purch. steam', 'other',
'composite', 'composit', 'mbtus', 'total', 'avg', 'avg.', 'blo',
'all fuel', 'comb.', 'alt. fuels', 'na', 'comb', '/#=2\x80â\x91?',
'kã\xadgv¸\x9d?', "mbtu's", 'gas, oil', 'rrm', '3\x9c', 'average',
'furfural', '0', 'watson bng', 'toal', 'bng', '# 6 & used', 'combined',
'blo bls', 'compsite', '*', 'compos.', 'gas / oil', 'mw days', 'g', 'c',
'lime', 'all fuels', 'at right', '20', '1', 'comp oil/gas', 'all fuels to',
'the right are', 'c omposite', 'all fuels are', 'total pr crk',
'all fuels =', 'total pc', 'comp', 'alternative', 'alt. fuel', 'bio fuel',
'total prairie', ''
]
"""list: A list of strings which are used to represent other fuels in FERC Form
1 reporting.
"""
# There are also a bunch of other weird and hard to categorize strings
# that I don't know what to do with... hopefully they constitute only a
# small fraction of the overall generation.
ferc1_fuel_strings = {"coal": ferc1_coal_strings,
"oil": ferc1_oil_strings,
"gas": ferc1_gas_strings,
"solar": ferc1_solar_strings,
"wind": ferc1_wind_strings,
"hydro": ferc1_hydro_strings,
"nuclear": ferc1_nuke_strings,
"waste": ferc1_waste_strings,
"other": ferc1_other_strings
}
"""dict: A dictionary linking fuel types (keys) to lists of various strings
representing that fuel (values)
"""
# Similarly, dictionary for cleaning up fuel unit strings
ferc1_ton_strings = ['toms', 'taons', 'tones', 'col-tons', 'toncoaleq', 'coal',
'tons coal eq', 'coal-tons', 'ton', 'tons', 'tons coal',
'coal-ton', 'tires-tons', 'coal tons -2 ',
'coal tons 200', 'ton-2000', 'coal tons -2', 'coal tons',
'coal-tone', 'tire-ton', 'tire-tons', 'ton coal eqv']
"""list: A list of fuel unit strings for tons."""
ferc1_mcf_strings = \
['mcf', "mcf's", 'mcfs', 'mcf.', 'gas mcf', '"gas" mcf', 'gas-mcf',
'mfc', 'mct', ' mcf', 'msfs', 'mlf', 'mscf', 'mci', 'mcl', 'mcg',
'm.cu.ft.', 'kcf', '(mcf)', 'mcf *(4)', 'mcf00', 'm.cu.ft..']
"""list: A list of fuel unit strings for thousand cubic feet."""
ferc1_bbl_strings = \
['barrel', 'bbls', 'bbl', 'barrels', 'bbrl', 'bbl.', 'bbls.',
'oil 42 gal', 'oil-barrels', 'barrrels', 'bbl-42 gal',
'oil-barrel', 'bb.', 'barrells', 'bar', 'bbld', 'oil- barrel',
'barrels .', 'bbl .', 'barels', 'barrell', 'berrels', 'bb',
'bbl.s', 'oil-bbl', 'bls', 'bbl:', 'barrles', 'blb', 'propane-bbl',
'barriel', 'berriel', 'barrile', '(bbl.)', 'barrel *(4)', '(4) barrel',
'bbf', 'blb.', '(bbl)', 'bb1', 'bbsl', 'barrrel', 'barrels 100%',
'bsrrels', "bbl's", '*barrels', 'oil - barrels', 'oil 42 gal ba', 'bll',
'boiler barrel', 'gas barrel', '"boiler" barr', '"gas" barrel',
'"boiler"barre', '"boiler barre', 'barrels .']
"""list: A list of fuel unit strings for barrels."""
ferc1_gal_strings = ['gallons', 'gal.', 'gals', 'gals.', 'gallon', 'gal',
'galllons']
"""list: A list of fuel unit strings for gallons."""
ferc1_1kgal_strings = ['oil(1000 gal)', 'oil(1000)', 'oil (1000)', 'oil(1000',
'oil(1000ga)']
"""list: A list of fuel unit strings for thousand gallons."""
ferc1_gramsU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'gram', 'grams', 'gm u', 'grams u235', 'grams u-235', 'grams of uran',
'grams: u-235', 'grams:u-235', 'grams:u235', 'grams u308', 'grams: u235',
'grams of', 'grams - n/a', 'gms uran', 's e uo2 grams', 'gms uranium',
'grams of urm', 'gms. of uran', 'grams (100%)', 'grams v-235',
'se uo2 grams'
]
"""list: A list of fuel unit strings for grams."""
ferc1_kgU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'kg of uranium', 'kg uranium', 'kilg. u-235', 'kg u-235', 'kilograms-u23',
'kg', 'kilograms u-2', 'kilograms', 'kg of', 'kg-u-235', 'kilgrams',
'kilogr. u235', 'uranium kg', 'kg uranium25', 'kilogr. u-235',
'kg uranium 25', 'kilgr. u-235', 'kguranium 25', 'kg-u235'
]
"""list: A list of fuel unit strings for thousand grams."""
ferc1_mmbtu_strings = ['mmbtu', 'mmbtus', 'mbtus', '(mmbtu)',
"mmbtu's", 'nuclear-mmbtu', 'nuclear-mmbt']
"""list: A list of fuel unit strings for million British Thermal Units."""
ferc1_mwdth_strings = \
['mwd therman', 'mw days-therm', 'mwd thrml', 'mwd thermal',
'mwd/mtu', 'mw days', 'mwdth', 'mwd', 'mw day', 'dth', 'mwdaysthermal',
'mw day therml', 'mw days thrml', 'nuclear mwd', 'mmwd', 'mw day/therml'
'mw days/therm', 'mw days (th', 'ermal)']
"""list: A list of fuel unit strings for megawatt days thermal."""
ferc1_mwhth_strings = ['mwh them', 'mwh threm', 'nwh therm', 'mwhth',
'mwh therm', 'mwh', 'mwh therms.', 'mwh term.uts',
'mwh thermal', 'mwh thermals', 'mw hr therm',
'mwh therma', 'mwh therm.uts']
"""list: A list of fuel unit strings for megawatt hours thermal."""
ferc1_fuel_unit_strings = {'ton': ferc1_ton_strings,
'mcf': ferc1_mcf_strings,
'bbl': ferc1_bbl_strings,
'gal': ferc1_gal_strings,
'1kgal': ferc1_1kgal_strings,
'gramsU': ferc1_gramsU_strings,
'kgU': ferc1_kgU_strings,
'mmbtu': ferc1_mmbtu_strings,
'mwdth': ferc1_mwdth_strings,
'mwhth': ferc1_mwhth_strings
}
"""
dict: A dictionary linking fuel units (keys) to lists of various strings
representing those fuel units (values)
"""
# Categorizing the strings from the FERC Form 1 Plant Kind (plant_kind) field
# into lists. There are many strings that weren't categorized,
# Solar and Solar Project were not classified as these do not indicate if they
# are solar thermal or photovoltaic. Variants on Steam (e.g. "steam 72" and
# "steam and gas") were classified based on additional research of the plants
# on the Internet.
ferc1_plant_kind_steam_turbine = [
'coal', 'steam', 'steam units 1 2 3', 'steam units 4 5',
'steam fossil', 'steam turbine', 'steam a', 'steam 100',
'steam units 1 2 3', 'steams', 'steam 1', 'steam retired 2013', 'stream',
'steam units 1,2,3', 'steam units 4&5', 'steam units 4&6',
'steam conventional', 'unit total-steam', 'unit total steam',
'*resp. share steam', 'resp. share steam', 'steam (see note 1,',
'steam (see note 3)', 'mpc 50%share steam', '40% share steam'
'steam (2)', 'steam (3)', 'steam (4)', 'steam (5)', 'steam (6)',
'steam (7)', 'steam (8)', 'steam units 1 and 2', 'steam units 3 and 4',
'steam (note 1)', 'steam (retired)', 'steam (leased)', 'coal-fired steam',
'oil-fired steam', 'steam/fossil', 'steam (a,b)', 'steam (a)', 'stean',
'steam-internal comb', 'steam (see notes)', 'steam units 4 & 6',
'resp share stm note3' 'mpc50% share steam', 'mpc40%share steam',
'steam - 64%', 'steam - 100%', 'steam (1) & (2)', 'resp share st note3',
'mpc 50% shares steam', 'steam-64%', 'steam-100%', 'steam (see note 1)',
'mpc 50% share steam', 'steam units 1, 2, 3', 'steam units 4, 5',
'steam (2)', 'steam (1)', 'steam 4, 5', 'steam - 72%', 'steam (incl i.c.)',
'steam- 72%', 'steam;retired - 2013', "respondent's sh.-st.",
"respondent's sh-st", '40% share steam', 'resp share stm note3',
'mpc50% share steam', 'resp share st note 3', '\x02steam (1)',
]
"""
list: A list of strings from FERC Form 1 for the steam turbine plant kind.
"""
ferc1_plant_kind_combustion_turbine = [
'combustion turbine', 'gt', 'gas turbine',
'gas turbine # 1', 'gas turbine', 'gas turbine (note 1)',
'gas turbines', 'simple cycle', 'combustion turbine',
'comb.turb.peak.units', 'gas turbine', 'combustion turbine',
'com turbine peaking', 'gas turbine peaking', 'comb turb peaking',
'combustine turbine', 'comb. turine', 'conbustion turbine',
'combustine turbine', 'gas turbine (leased)', 'combustion tubine',
'gas turb', 'gas turbine peaker', 'gtg/gas', 'simple cycle turbine',
'gas-turbine', 'gas turbine-simple', 'gas turbine - note 1',
'gas turbine #1', 'simple cycle', 'gasturbine', 'combustionturbine',
'gas turbine (2)', 'comb turb peak units', 'jet engine',
'jet powered turbine', '*gas turbine', 'gas turb.(see note5)',
'gas turb. (see note', 'combutsion turbine', 'combustion turbin',
'gas turbine-unit 2', 'gas - turbine', 'comb turbine peaking',
'gas expander turbine', 'jet turbine', 'gas turbin (lease',
'gas turbine (leased', 'gas turbine/int. cm', 'comb.turb-gas oper.',
'comb.turb.gas/oil op', 'comb.turb.oil oper.', 'jet', 'comb. turbine (a)',
'gas turb.(see notes)', 'gas turb(see notes)', 'comb. turb-gas oper',
'comb.turb.oil oper', 'gas turbin (leasd)', 'gas turbne/int comb',
'gas turbine (note1)', 'combution turbin', '* gas turbine',
'add to gas turbine', 'gas turbine (a)', 'gas turbinint comb',
'gas turbine (note 3)', 'resp share gas note3', 'gas trubine',
'*gas turbine(note3)', 'gas turbine note 3,6', 'gas turbine note 4,6',
'gas turbine peakload', 'combusition turbine', 'gas turbine (lease)',
'comb. turb-gas oper.', 'combution turbine', 'combusion turbine',
'comb. turb. oil oper', 'combustion burbine', 'combustion and gas',
'comb. turb.', 'gas turbine (lease', 'gas turbine (leasd)',
'gas turbine/int comb', '*gas turbine(note 3)', 'gas turbine (see nos',
'i.c.e./gas turbine', 'gas turbine/intcomb', 'cumbustion turbine',
'gas turb, int. comb.', 'gas turb, diesel', 'gas turb, int. comb',
'i.c.e/gas turbine', 'diesel turbine', 'comubstion turbine',
'i.c.e. /gas turbine', 'i.c.e/ gas turbine', 'i.c.e./gas tubine',
]
"""list: A list of strings from FERC Form 1 for the combustion turbine plant
kind.
"""
ferc1_plant_kind_combined_cycle = [
'Combined cycle', 'combined cycle', 'combined', 'gas & steam turbine',
'gas turb. & heat rec', 'combined cycle', 'com. cyc', 'com. cycle',
'gas turb-combined cy', 'combined cycle ctg', 'combined cycle - 40%',
'com cycle gas turb', 'combined cycle oper', 'gas turb/comb. cyc',
'combine cycle', 'cc', 'comb. cycle', 'gas turb-combined cy',
'steam and cc', 'steam cc', 'gas steam', 'ctg steam gas',
'steam comb cycle', 'gas/steam comb. cycl', 'steam (comb. cycle)'
'gas turbine/steam', 'steam & gas turbine', 'gas trb & heat rec',
'steam & combined ce', 'st/gas turb comb cyc', 'gas tur & comb cycl',
'combined cycle (a,b)', 'gas turbine/ steam', 'steam/gas turb.',
'steam & comb cycle', 'gas/steam comb cycle', 'comb cycle (a,b)', 'igcc',
'steam/gas turbine', 'gas turbine / steam', 'gas tur & comb cyc',
'comb cyc (a) (b)', 'comb cycle', 'comb cyc', 'combined turbine',
'combine cycle oper', 'comb cycle/steam tur', 'cc / gas turb',
'steam (comb. cycle)', 'steam & cc', 'gas turbine/steam',
'gas turb/cumbus cycl', 'gas turb/comb cycle', 'gasturb/comb cycle',
'gas turb/cumb. cyc', 'igcc/gas turbine', 'gas / steam', 'ctg/steam-gas',
'ctg/steam -gas'
]
"""
list: A list of strings from FERC Form 1 for the combined cycle plant kind.
"""
ferc1_plant_kind_nuke = [
'nuclear', 'nuclear (3)', 'steam(nuclear)', 'nuclear(see note4)'
'nuclear steam', 'nuclear turbine', 'nuclear - steam',
'nuclear (a)(b)(c)', 'nuclear (b)(c)', '* nuclear', 'nuclear (b) (c)',
'nuclear (see notes)', 'steam (nuclear)', '* nuclear (note 2)',
'nuclear (note 2)', 'nuclear (see note 2)', 'nuclear(see note4)',
'nuclear steam', 'nuclear(see notes)', 'nuclear-steam',
'nuclear (see note 3)'
]
"""list: A list of strings from FERC Form 1 for the nuclear plant kind."""
ferc1_plant_kind_geothermal = [
'steam - geothermal', 'steam_geothermal', 'geothermal'
]
"""list: A list of strings from FERC Form 1 for the geothermal plant kind."""
ferc_1_plant_kind_internal_combustion = [
'ic', 'internal combustion', 'internal comb.', 'internl combustion'
'diesel turbine', 'int combust (note 1)', 'int. combust (note1)',
'int.combustine', 'comb. cyc', 'internal comb', 'diesel', 'diesel engine',
'internal combustion', 'int combust - note 1', 'int. combust - note1',
'internal comb recip', 'reciprocating engine', 'comb. turbine',
'internal combust.', 'int. combustion (1)', '*int combustion (1)',
"*internal combust'n", 'internal', 'internal comb.', 'steam internal comb',
'combustion', 'int. combustion', 'int combust (note1)', 'int. combustine',
'internl combustion', '*int. combustion (1)'
]
"""
list: A list of strings from FERC Form 1 for the internal combustion plant
kind.
"""
ferc1_plant_kind_wind = [
'wind', 'wind energy', 'wind turbine', 'wind - turbine', 'wind generation'
]
"""list: A list of strings from FERC Form 1 for the wind plant kind."""
ferc1_plant_kind_photovoltaic = [
'solar photovoltaic', 'photovoltaic', 'solar', 'solar project'
]
"""list: A list of strings from FERC Form 1 for the photovoltaic plant kind."""
ferc1_plant_kind_solar_thermal = ['solar thermal']
"""
list: A list of strings from FERC Form 1 for the solar thermal plant kind.
"""
# Making a dictionary of lists from the lists of plant_fuel strings to create
# a dictionary of plant fuel lists.
ferc1_plant_kind_strings = {
'steam': ferc1_plant_kind_steam_turbine,
'combustion_turbine': ferc1_plant_kind_combustion_turbine,
'combined_cycle': ferc1_plant_kind_combined_cycle,
'nuclear': ferc1_plant_kind_nuke,
'geothermal': ferc1_plant_kind_geothermal,
'internal_combustion': ferc_1_plant_kind_internal_combustion,
'wind': ferc1_plant_kind_wind,
'photovoltaic': ferc1_plant_kind_photovoltaic,
'solar_thermal': ferc1_plant_kind_solar_thermal
}
"""
dict: A dictionary of plant kinds (keys) and associated lists of plant_fuel
strings (values).
"""
# This is an alternative set of strings for simplifying the plant kind field
# from Uday & Laura at CPI. For the moment we have reverted to using our own
# categorizations which are more detailed, but these are preserved here for
# comparison and testing, if need be.
cpi_diesel_strings = ['DIESEL', 'Diesel Engine', 'Diesel Turbine', ]
"""
list: A list of strings for fuel type diesel compiled by Climate Policy
Initiative.
"""
cpi_geothermal_strings = ['Steam - Geothermal', ]
"""
list: A list of strings for fuel type geothermal compiled by Climate Policy
Initiative.
"""
cpi_natural_gas_strings = [
'Combined Cycle', 'Combustion Turbine', 'GT',
'GAS TURBINE', 'Comb. Turbine', 'Gas Turbine #1', 'Combine Cycle Oper',
'Combustion', 'Combined', 'Gas Turbine/Steam', 'Gas Turbine Peaker',
'Gas Turbine - Note 1', 'Resp Share Gas Note3', 'Gas Turbines',
'Simple Cycle', 'Gas / Steam', 'GasTurbine', 'Combine Cycle',
'CTG/Steam-Gas', 'GTG/Gas', 'CTG/Steam -Gas', 'Steam/Gas Turbine',
'CombustionTurbine', 'Gas Turbine-Simple', 'STEAM & GAS TURBINE',
'Gas & Steam Turbine', 'Gas', 'Gas Turbine (2)', 'COMBUSTION AND GAS',
'Com Turbine Peaking', 'Gas Turbine Peaking', 'Comb Turb Peaking',
'JET ENGINE', 'Comb. Cyc', 'Com. Cyc', 'Com. Cycle',
'GAS TURB-COMBINED CY', 'Gas Turb', 'Combined Cycle - 40%',
'IGCC/Gas Turbine', 'CC', 'Combined Cycle Oper', 'Simple Cycle Turbine',
'Steam and CC', 'Com Cycle Gas Turb', 'I.C.E/ Gas Turbine',
'Combined Cycle CTG', 'GAS-TURBINE', 'Gas Expander Turbine',
'Gas Turbine (Leased)', 'Gas Turbine # 1', 'Gas Turbine (Note 1)',
'COMBUSTINE TURBINE', 'Gas Turb, Int. Comb.', 'Combined Turbine',
'Comb Turb Peak Units', 'Combustion Tubine', 'Comb. Cycle',
'COMB.TURB.PEAK.UNITS', 'Steam and CC', 'I.C.E. /Gas Turbine',
'Conbustion Turbine', 'Gas Turbine/Int Comb', 'Steam & CC',
'GAS TURB. & HEAT REC', 'Gas Turb/Comb. Cyc', 'Comb. Turine',
]
"""list: A list of strings for fuel type gas compiled by Climate Policy
Initiative.
"""
cpi_nuclear_strings = ['Nuclear', 'Nuclear (3)', ]
"""list: A list of strings for fuel type nuclear compiled by Climate Policy
Initiative.
"""
cpi_other_strings = [
'IC', 'Internal Combustion', 'Int Combust - Note 1',
'Resp. Share - Note 2', 'Int. Combust - Note1', 'Resp. Share - Note 4',
'Resp Share - Note 5', 'Resp. Share - Note 7', 'Internal Comb Recip',
'Reciprocating Engine', 'Internal Comb', 'Resp. Share - Note 8',
'Resp. Share - Note 9', 'Resp Share - Note 11', 'Resp. Share - Note 6',
'INT.COMBUSTINE', 'Steam (Incl I.C.)', 'Other', 'Int Combust (Note 1)',
'Resp. Share (Note 2)', 'Int. Combust (Note1)', 'Resp. Share (Note 8)',
'Resp. Share (Note 9)', 'Resp Share (Note 11)', 'Resp. Share (Note 4)',
'Resp. Share (Note 6)', 'Plant retired- 2013', 'Retired - 2013',
]
"""list: A list of strings for fuel type other compiled by Climate Policy
Initiative.
"""
cpi_steam_strings = [
'Steam', 'Steam Units 1, 2, 3', 'Resp Share St Note 3',
'Steam Turbine', 'Steam-Internal Comb', 'IGCC', 'Steam- 72%', 'Steam (1)',
'Steam (1)', 'Steam Units 1,2,3', 'Steam/Fossil', 'Steams', 'Steam - 72%',
'Steam - 100%', 'Stream', 'Steam Units 4, 5', 'Steam - 64%', 'Common',
'Steam (A)', 'Coal', 'Steam;Retired - 2013', 'Steam Units 4 & 6',
]
"""list: A list of strings for fuel type steam compiled by Climate Policy
Initiative.
"""
cpi_wind_strings = ['Wind', 'Wind Turbine', 'Wind - Turbine', 'Wind Energy', ]
"""list: A list of strings for fuel type wind compiled by Climate Policy
Initiative.
"""
cpi_solar_strings = [
'Solar Photovoltaic', 'Solar Thermal', 'SOLAR PROJECT', 'Solar',
'Photovoltaic',
]
"""list: A list of strings for fuel type photovoltaic compiled by Climate
Policy Initiative.
"""
cpi_plant_kind_strings = {
'natural_gas': cpi_natural_gas_strings,
'diesel': cpi_diesel_strings,
'geothermal': cpi_geothermal_strings,
'nuclear': cpi_nuclear_strings,
'steam': cpi_steam_strings,
'wind': cpi_wind_strings,
'solar': cpi_solar_strings,
'other': cpi_other_strings,
}
"""dict: A dictionary linking fuel types (keys) to lists of strings associated
by Climate Policy Institute with those fuel types (values).
"""
# Categorizing the strings from the FERC Form 1 Type of Plant Construction
# (construction_type) field into lists.
# There are many strings that weren't categorized, including crosses between
# conventional and outdoor, PV, wind, combined cycle, and internal combustion.
# The lists are broken out into the two types specified in Form 1:
# conventional and outdoor. These lists are inclusive so that variants of
# conventional (e.g. "conventional full") and outdoor (e.g. "outdoor full"
# and "outdoor hrsg") are included.
ferc1_const_type_outdoor = [
'outdoor', 'outdoor boiler', 'full outdoor', 'outdoor boiler',
'outdoor boilers', 'outboilers', 'fuel outdoor', 'full outdoor',
'outdoors', 'outdoor', 'boiler outdoor& full', 'boiler outdoor&full',
'outdoor boiler& full', 'full -outdoor', 'outdoor steam',
'outdoor boiler', 'ob', 'outdoor automatic', 'outdoor repower',
'full outdoor boiler', 'fo', 'outdoor boiler & ful', 'full-outdoor',
'fuel outdoor', 'outoor', 'outdoor', 'outdoor boiler&full',
'boiler outdoor &full', 'outdoor boiler &full', 'boiler outdoor & ful',
'outdoor-boiler', 'outdoor - boiler', 'outdoor const.',
'4 outdoor boilers', '3 outdoor boilers', 'full outdoor', 'full outdoors',
'full oudoors', 'outdoor (auto oper)', 'outside boiler',
'outdoor boiler&full', 'outdoor hrsg', 'outdoor hrsg',
'outdoor-steel encl.', 'boiler-outdr & full',
'con.& full outdoor', 'partial outdoor', 'outdoor (auto. oper)',
'outdoor (auto.oper)', 'outdoor construction', '1 outdoor boiler',
'2 outdoor boilers', 'outdoor enclosure', '2 outoor boilers',
'boiler outdr.& full', 'boiler outdr. & full', 'ful outdoor',
'outdoor-steel enclos', 'outdoor (auto oper.)', 'con. & full outdoor',
'outdore', 'boiler & full outdor', 'full & outdr boilers',
'outodoor (auto oper)', 'outdoor steel encl.', 'full outoor',
'boiler & outdoor ful', 'otdr. blr. & f. otdr', 'f.otdr & otdr.blr.',
'oudoor (auto oper)', 'outdoor constructin', 'f. otdr. & otdr. blr',
]
"""list: A list of strings from FERC Form 1 associated with the outdoor
construction type.
"""
ferc1_const_type_semioutdoor = [
'more than 50% outdoo', 'more than 50% outdos', 'over 50% outdoor',
'over 50% outdoors', 'semi-outdoor', 'semi - outdoor', 'semi outdoor',
'semi-enclosed', 'semi-outdoor boiler', 'semi outdoor boiler',
'semi- outdoor', 'semi - outdoors', 'semi -outdoor'
'conven & semi-outdr', 'conv & semi-outdoor', 'conv & semi- outdoor',
'convent. semi-outdr', 'conv. semi outdoor', 'conv(u1)/semiod(u2)',
'conv u1/semi-od u2', 'conv-one blr-semi-od', 'convent semioutdoor',
'conv. u1/semi-od u2', 'conv - 1 blr semi od', 'conv. ui/semi-od u2',
'conv-1 blr semi-od', 'conven. semi-outdoor', 'conv semi-outdoor',
'u1-conv./u2-semi-od', 'u1-conv./u2-semi -od', 'convent. semi-outdoo',
'u1-conv. / u2-semi', 'conven & semi-outdr', 'semi -outdoor',
'outdr & conventnl', 'conven. full outdoor', 'conv. & outdoor blr',
'conv. & outdoor blr.', 'conv. & outdoor boil', 'conv. & outdr boiler',
'conv. & out. boiler', 'convntl,outdoor blr', 'outdoor & conv.',
'2 conv., 1 out. boil', 'outdoor/conventional', 'conv. boiler outdoor',
'conv-one boiler-outd', 'conventional outdoor', 'conventional outdor',
'conv. outdoor boiler', 'conv.outdoor boiler', 'conventional outdr.',
'conven,outdoorboiler', 'conven full outdoor', 'conven,full outdoor',
'1 out boil, 2 conv', 'conv. & full outdoor', 'conv. & outdr. boilr',
'conv outdoor boiler', 'convention. outdoor', 'conv. sem. outdoor',
'convntl, outdoor blr', 'conv & outdoor boil', 'conv & outdoor boil.',
'outdoor & conv', 'conv. broiler outdor', '1 out boilr, 2 conv',
'conv.& outdoor boil.', 'conven,outdr.boiler', 'conven,outdr boiler',
'outdoor & conventil', '1 out boilr 2 conv', 'conv & outdr. boilr',
'conven, full outdoor', 'conven full outdr.', 'conven, full outdr.',
'conv/outdoor boiler', "convnt'l outdr boilr", '1 out boil 2 conv',
'conv full outdoor', 'conven, outdr boiler', 'conventional/outdoor',
'conv&outdoor boiler', 'outdoor & convention', 'conv & outdoor boilr',
'conv & full outdoor', 'convntl. outdoor blr', 'conv - ob',
"1conv'l/2odboilers", "2conv'l/1odboiler", 'conv-ob', 'conv.-ob',
'1 conv/ 2odboilers', '2 conv /1 odboilers', 'conv- ob', 'conv -ob',
'con sem outdoor', 'cnvntl, outdr, boilr', 'less than 50% outdoo',
'under 50% outdoor', 'under 50% outdoors', '1cnvntnl/2odboilers',
'2cnvntnl1/1odboiler', 'con & ob', 'combination (b)', 'indoor & outdoor',
'conven. blr. & full', 'conv. & otdr. blr.', 'combination',
'indoor and outdoor', 'conven boiler & full', "2conv'l/10dboiler",
'4 indor/outdr boiler', '4 indr/outdr boilerr', '4 indr/outdr boiler',
'indoor & outdoof',
]
"""list: A list of strings from FERC Form 1 associated with the semi - outdoor
construction type, or a mix of conventional and outdoor construction.
"""
ferc1_const_type_conventional = [
'conventional', 'conventional', 'conventional boiler', 'conv-b',
'conventionall', 'convention', 'conventional', 'coventional',
'conven full boiler', 'c0nventional', 'conventtional', 'convential'
'underground', 'conventional bulb', 'conventrional',
'*conventional', 'convential', 'convetional', 'conventioanl',
'conventioinal', 'conventaional', 'indoor construction', 'convenional',
'conventional steam', 'conventinal', 'convntional', 'conventionl',
'conventionsl', 'conventiional', 'convntl steam plants', 'indoor const.',
'full indoor', 'indoor', 'indoor automatic', 'indoor boiler',
'(peak load) indoor', 'conventionl,indoor', 'conventionl, indoor',
'conventional, indoor', 'comb. cycle indoor', '3 indoor boiler',
'2 indoor boilers', '1 indoor boiler', '2 indoor boiler',
'3 indoor boilers', 'fully contained', 'conv - b', 'conventional/boiler',
'cnventional', 'comb. cycle indooor', 'sonventional',
]
"""list: A list of strings from FERC Form 1 associated with the conventional
construction type.
"""
# Making a dictionary of lists from the lists of construction_type strings to
# create a dictionary of construction type lists.
ferc1_const_type_strings = {
'outdoor': ferc1_const_type_outdoor,
'semioutdoor': ferc1_const_type_semioutdoor,
'conventional': ferc1_const_type_conventional,
}
"""dict: A dictionary of construction types (keys) and lists of construction
type strings associated with each type (values) from FERC Form 1.
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
ferc714_pudl_tables = (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
)
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data.
"""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
# patterns for matching columns to months:
month_dict_eia923 = {1: '_january$',
2: '_february$',
3: '_march$',
4: '_april$',
5: '_may$',
6: '_june$',
7: '_july$',
8: '_august$',
9: '_september$',
10: '_october$',
11: '_november$',
12: '_december$'}
"""dict: A dictionary mapping column numbers (keys) to months (values).
"""
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple containing the list of EIA 860 tables that can be
successfully pulled into PUDL.
"""
eia861_pudl_tables = (
"service_territory_eia861",
)
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OC': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [ # base cols
['plant_id_eia'],
# static cols
['balancing_authority_code', 'balancing_authority_name',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude',
'nerc_region', 'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'net_metering', 'pipeline_notes',
'regulatory_status_code', 'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
# {'plant_id_eia': 'int64',
# 'grid_voltage_2_kv': 'float64',
# 'grid_voltage_3_kv': 'float64',
# 'grid_voltage_kv': 'float64',
# 'longitude': 'float64',
# 'latitude': 'float64',
# 'primary_purpose_naics_id': 'float64',
# 'sector_id': 'float64',
# 'zip_code': 'float64',
# 'utility_id_eia': 'float64'},
],
'generators': [ # base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'deliver_power_transgrid', 'summer_capacity_mw',
'winter_capacity_mw', 'minimum_load_mw', 'technology_description',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date', 'utility_id_eia'],
# need type fixing
{}
# {'plant_id_eia': 'int64',
# 'generator_id': 'str'},
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [ # base cols
['utility_id_eia'],
# static cols
['utility_name_eia',
'entity_type'],
# annual cols
['street_address', 'city', 'state', 'zip_code',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [ # base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{}, ]}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
# EPA CEMS constants #####
epacems_rename_dict = {
"STATE": "state",
# "FACILITY_NAME": "plant_name", # Not reading from CSV
"ORISPL_CODE": "plant_id_eia",
"UNITID": "unitid",
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": "op_date",
"OP_HOUR": "op_hour",
"OP_TIME": "operating_time_hours",
"GLOAD (MW)": "gross_load_mw",
"GLOAD": "gross_load_mw",
"SLOAD (1000 lbs)": "steam_load_1000_lbs",
"SLOAD (1000lb/hr)": "steam_load_1000_lbs",
"SLOAD": "steam_load_1000_lbs",
"SO2_MASS (lbs)": "so2_mass_lbs",
"SO2_MASS": "so2_mass_lbs",
"SO2_MASS_MEASURE_FLG": "so2_mass_measurement_code",
# "SO2_RATE (lbs/mmBtu)": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": "so2_rate_measure_flg", # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": "nox_rate_lbs_mmbtu",
"NOX_RATE": "nox_rate_lbs_mmbtu",
"NOX_RATE_MEASURE_FLG": "nox_rate_measurement_code",
"NOX_MASS (lbs)": "nox_mass_lbs",
"NOX_MASS": "nox_mass_lbs",
"NOX_MASS_MEASURE_FLG": "nox_mass_measurement_code",
"CO2_MASS (tons)": "co2_mass_tons",
"CO2_MASS": "co2_mass_tons",
"CO2_MASS_MEASURE_FLG": "co2_mass_measurement_code",
# "CO2_RATE (tons/mmBtu)": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": "co2_rate_measure_flg", # Not reading from CSV
"HEAT_INPUT (mmBtu)": "heat_content_mmbtu",
"HEAT_INPUT": "heat_content_mmbtu",
"FAC_ID": "facility_id",
"UNIT_ID": "unit_id_epa",
}
"""dict: A dictionary containing EPA CEMS column names (keys) and replacement
names to use when reading those columns into PUDL (values).
"""
# Any column that exactly matches one of these won't be read
epacems_columns_to_ignore = {
"FACILITY_NAME",
"SO2_RATE (lbs/mmBtu)",
"SO2_RATE",
"SO2_RATE_MEASURE_FLG",
"CO2_RATE (tons/mmBtu)",
"CO2_RATE",
"CO2_RATE_MEASURE_FLG",
}
"""set: The set of EPA CEMS columns to ignore when reading data.
"""
# Specify dtypes to for reading the CEMS CSVs
epacems_csv_dtypes = {
"STATE": pd.StringDtype(),
# "FACILITY_NAME": str, # Not reading from CSV
"ORISPL_CODE": pd.Int64Dtype(),
"UNITID": pd.StringDtype(),
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": pd.StringDtype(),
"OP_HOUR": pd.Int64Dtype(),
"OP_TIME": float,
"GLOAD (MW)": float,
"GLOAD": float,
"SLOAD (1000 lbs)": float,
"SLOAD (1000lb/hr)": float,
"SLOAD": float,
"SO2_MASS (lbs)": float,
"SO2_MASS": float,
"SO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "SO2_RATE (lbs/mmBtu)": float, # Not reading from CSV
# "SO2_RATE": float, # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": float,
"NOX_RATE": float,
"NOX_RATE_MEASURE_FLG": pd.StringDtype(),
"NOX_MASS (lbs)": float,
"NOX_MASS": float,
"NOX_MASS_MEASURE_FLG": pd.StringDtype(),
"CO2_MASS (tons)": float,
"CO2_MASS": float,
"CO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "CO2_RATE (tons/mmBtu)": float, # Not reading from CSV
# "CO2_RATE": float, # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"HEAT_INPUT (mmBtu)": float,
"HEAT_INPUT": float,
"FAC_ID": pd.Int64Dtype(),
"UNIT_ID": pd.Int64Dtype(),
}
"""dict: A dictionary containing column names (keys) and data types (values)
for EPA CEMS.
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
epacems_additional_plant_info_file = importlib.resources.open_text(
'pudl.package_data.epa.cems', 'plant_info_for_additional_cems_plants.csv')
"""typing.TextIO:
Todo:
Return to
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
read_excel_epaipm_dict = {
'transmission_single_epaipm': dict(
skiprows=3,
usecols='B:F',
index_col=[0, 1],
),
'transmission_joint_epaipm': {},
'load_curves_epaipm': dict(
skiprows=3,
usecols='B:AB',
),
'plant_region_map_epaipm_active': dict(
sheet_name='NEEDS v6_Active',
usecols='C,I',
),
'plant_region_map_epaipm_retired': dict(
sheet_name='NEEDS v6_Retired_Through2021',
usecols='C,I',
),
}
"""
dict: A dictionary of dictionaries containing EPA IPM tables and associated
information for reading those tables into PUDL (values).
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2019)),
'eia861': tuple(range(1990, 2019)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_years = {
'eia860': tuple(range(2009, 2019)),
'eia861': tuple(range(1999, 2019)),
'eia923': tuple(range(2009, 2019)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years for
each data source that are able to be ingested into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': eia861_pudl_tables,
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': ferc714_pudl_tables,
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "C<NAME>ooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
'notebook',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
'utility_id_ferc1': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_id_ferc1': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'report_year': pd.Int64Dtype(),
'report_date': 'datetime64[ns]',
},
"ferc714": { # INCOMPLETE
"report_year": pd.Int64Dtype(),
"utility_id_ferc714": pd.Int64Dtype(),
"utility_id_eia": pd.Int64Dtype(),
"utility_name_ferc714": pd.StringDtype(),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'balancing_authority_code': pd.StringDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'contact_firstname': pd.StringDtype(),
'contact_firstname2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'current_planned_operating_date': 'datetime64[ns]',
'deliver_power_transgrid': pd.BooleanDtype(),
'duct_burners': pd.BooleanDtype(),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.StringDtype(),
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'longitude': float,
'mercury_content_ppm': float,
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.StringDtype(),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'original_planned_operating_date': 'datetime64[ns]',
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'other_combustion_tech': pd.BooleanDtype(),
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': pd.StringDtype(),
'owner_utility_id_eia': | pd.Int64Dtype() | pandas.Int64Dtype |
import yfinance as yf
import pandas as pd
import sys
from datetime import datetime
class FinanceAnalysis:
def analyze(self, best_n=25):
# Load data from file, generate data by running the `ticker_counts.py` script
date_created = datetime.today().strftime('%Y-%m-%d')
csv_filename = f"{date_created}_tick_df"
data_directory = "./data"
full_input_path = f"{data_directory}/{csv_filename}.csv"
tick_df = pd.read_csv(full_input_path).sort_values(by=["Mentions", "Ticker"], ascending=False)
tick_df.dropna(axis=1)
dataColumns = ["Name", "Industry", "Previous Close", "5d Low", "5d High", "1d Change (%)", "5d Change (%)",
"1mo Change (%)"]
df_best = tick_df.head(best_n)
df_best[dataColumns] = df_best.Ticker.apply(self.get_ticker_info)
# Save to file to load into yahoo analysis script
csv_filename = f"df_best_{best_n}"
full_output_path = f"{data_directory}/{csv_filename}.csv"
df_best.to_csv(full_output_path, index=False)
print(df_best.head())
def calculate_change(self, start: float, end: float) -> float:
"""Use Yahoo Finance API to get the relevant data."""
return round(((end - start) / start) * 100, 2)
def get_change(self, ticker: str, period: str = "1d") -> float:
return self.calculate_change(
yf.Ticker(ticker).history(period)["Open"].to_list()[0],
yf.Ticker(ticker).history(period)["Close"].to_list()[-1]
)
def get_ticker_info(self, ticker):
# Standard Data
info = yf.Ticker(ticker).info
tickerName = info.get("longName")
tickerIndustry = info.get("industry")
# previous Day close
tickerClose = yf.Ticker(ticker).history(period="1d")["Close"].to_list()[-1]
# Highs and Lows
highLow = yf.Ticker(ticker).history(period="5d")
Low5d = min(highLow["Low"].to_list())
High5d = max(highLow["High"].to_list())
# Changes
change1d = self.get_change(ticker)
change5d = self.get_change(ticker, "5d")
change1mo = self.get_change(ticker, "1mo")
return | pd.Series([tickerName, tickerIndustry, tickerClose, Low5d, High5d, change1d, change5d, change1mo]) | pandas.Series |
import pandas as pd
import numpy as np
from copy import deepcopy
import os
from data.download import DatasetDownloader
import tarfile
import sys
from scipy.interpolate import interp1d
from pyts.visualization import plot_paa
from pyts.transformation import PAA
import pickle
from scipy.spatial.distance import cdist, squareform
from data.DTWThread import DTWThread
import psutil
class Preprocessor:
"""
Class for preprocessing routines on the mobility data set.
"""
# Names of columns in all dataframes. Used to inject columns into empty dataframes.
DATAFRAME_COLUMN_NAMES = {
"cell": ['time', 'cid', 'lac', 'asu'],
"annotation": ['time', 'mode', 'notes'],
"location": ['time', 'gpstime', 'provider', 'longitude', 'latitude', 'altitude', 'speed', 'bearing',
'accuracy'],
"sensor": ['sensor', 'time', 'x', 'y', 'z', 'total'],
"mac": ['time', 'ssid', 'level'],
"marker": ['time', 'marker'],
"event": ['time', 'event', 'state']
}
@staticmethod
def preprocess(tokens, filename: str = None, distance_metric: str = "euclidean", use_individual_columns: bool = False, load_preprocessed: str = None):
"""
Executes all preprocessing steps.
:param tokens: List with keys of tokens to preprocess.
:param filename: Specifies name of file data should be dumped to. Not persisted to disk if specified value is
None. Note that filename is relative; all files are stored in /data/preprocessed.
:param distance_metric: Distance metric to apply for comparison between trip segments.
:param use_individual_columns: Defines whether individual columns (x, y, z) or the total (n2) value should be
used for distance calculation.
:load_preprocessed: str, default=None, specifies a path to a pickled preprocessed_data.dat file.
if this parameter is not None the preprocessing step is skipped and the pickled data will be
loaded.
:return: Dictionary with preprocessed data. Specified tokens are used as keys.
"""
# 1. Preprocess data per token.
if load_preprocessed is not None:
# Load dataframes from disk.
preprocessed_data = Preprocessor.restore_preprocessed_data_from_disk(filename=load_preprocessed)
else:
preprocessed_data = Preprocessor._preprocess_data_per_token(tokens=tokens)
# 2. Cut all trips in 30 second snippets
trips_cut_per_30_sec = Preprocessor.get_cut_trip_snippets_for_targets(
preprocessed_data,
snippet_length=30,
sensor_type="acceleration",
target_column_names=["total", "x", "y", "z"]
)
# 3. Apply distance metric and calculate distance matrix
distance_matrix = None
if distance_metric is not None:
if use_individual_columns:
distance_matrix = Preprocessor.calculate_distance_for_individual_columns(
dataframes=trips_cut_per_30_sec[1:4]
)
else:
distance_matrix = Preprocessor.calculate_distance_for_n2(
trips_cut_per_30_sec[0],
metric=distance_metric
)
# 4. Dump data to file, if requested.
if filename is not None:
Preprocessor.persist_results(
filename=filename,
preprocessed_data=preprocessed_data,
trips_cut_per_30_sec=trips_cut_per_30_sec,
distance_metric=distance_metric,
distance_matrix_n2=distance_matrix,
use_individual_columns=use_individual_columns
)
return preprocessed_data
@staticmethod
def _preprocess_data_per_token(tokens: list):
"""
List of tokens whose data is to be processed.
:param tokens:
:return: Dictionary with preprocessed data per token.
"""
preprocessed_data = {}
for token in tokens:
# 1. Get travel data per token, remove dataframes without annotations.
dfs = Preprocessor.replace_none_values_with_empty_dataframes(
# Drop dataframes w/o annotations.
Preprocessor._remove_dataframes_without_annotation(
# Get travel data per token.
Preprocessor.get_data_per_token(token)
)
)
# 2. Remove trips less than 10 minutes long.
dfs = Preprocessor.replace_none_values_with_empty_dataframes(
Preprocessor._remove_dataframes_by_duration_limit(dfs, 10 * 60)
)
# 3. Cut first and last 30 seconds from scripted trips.
dfs = Preprocessor.replace_none_values_with_empty_dataframes(
Preprocessor._cut_off_start_and_end_in_dataframes(
dataframes=dfs, list_of_dataframe_names_to_cut=["sensor", "location"], cutoff_in_seconds=60
)
)
# 4. Perform PAA.
resampled_sensor_values = Preprocessor.replace_none_values_with_empty_dataframes(
Preprocessor.calculate_paa(dfs)
)
# Prepare dictionary with results.
preprocessed_data[token] = resampled_sensor_values
return preprocessed_data
@staticmethod
def persist_results(filename: str, preprocessed_data: dict, trips_cut_per_30_sec: list,
distance_metric: str, distance_matrix_n2: pd.DataFrame, use_individual_columns=False):
"""
Stores preprocessing results on disk.
:param filename:
:param preprocessed_data:
:param trips_cut_per_30_sec:
:param distance_metric:
:param distance_matrix_n2:
:param use_individual_columns: indicates if individual columns were used
:return:
"""
data_dir = DatasetDownloader.get_data_dir()
preprocessed_path = os.path.join(data_dir, "preprocessed")
# make sure the directory exists
DatasetDownloader.setup_directory(preprocessed_path)
full_path = os.path.join(preprocessed_path, filename)
with open(full_path, "wb") as file:
file.write(pickle.dumps(preprocessed_data))
trips_cut_per_30_sec[0].to_csv(full_path[:-4] + "_total.csv", sep=";", index=False)
trips_cut_per_30_sec[1].to_csv(full_path[:-4] + "_x.csv", sep=";", index=False)
trips_cut_per_30_sec[2].to_csv(full_path[:-4] + "_y.csv", sep=";", index=False)
trips_cut_per_30_sec[3].to_csv(full_path[:-4] + "_z.csv", sep=";", index=False)
if distance_metric is not None:
if use_individual_columns:
distance_matrix_n2_path = full_path[:-4] + "_" + "individual" + "_" + distance_metric + "_xyz" +".csv"
else:
distance_matrix_n2_path = full_path[:-4] + "_" + distance_metric + ".csv"
distance_matrix_n2.to_csv(distance_matrix_n2_path, sep=";", index=False)
@staticmethod
def replace_none_values_with_empty_dataframes(dataframe_dicts: list):
"""
Checks every dictionary in every dictionary in specified list for None values, replaces them with empty data-
frames.
:param dataframe_dicts: List of dictionaries containing one dataframe for each key.
:return: List in same format with Nones replaced by empty dataframes.
"""
# For every key in every dictionary in list: Create new dictionary with Nones replaced by empty dataframes;
# concatenate new dictionaries to list.
return [
{
key: pd.DataFrame(columns=Preprocessor.DATAFRAME_COLUMN_NAMES[key])
if df_dict[key] is None else df_dict[key]
for key in df_dict
} for df_dict in dataframe_dicts
]
@staticmethod
def get_cut_trip_snippets_for_targets(dfs, target_column_names: list, snippet_length=30, sensor_type="acceleration"):
"""
This method gets a dictionary of trips per token and cuts them in the
specified snippet_length. It uses the columns of the specified names
(i. e. one or several of: "total", "x", "y", "z") in the sensor table.
Parameters
----------
dfs: dictionary with the assumed nested structure
dict[token] = list of trips per token and each trip consists of tables for
at least "annotation" and "sensor"
snippet_length: int, default=30,
specifies the length of the time snippets in seconds
sensor_type: string, default="acceleration"
specifies which sensor type should be used for each entry
target_column_names: list
Specifies which columns should represent trip observation.
Returns
-------
result: returns a list of pandas.DataFrames where each row is a snippet with length snippet_length
and each column is one recording step. Each entry corresponds
to the total aka n2 value of the original data. Additional columns are:
"mode","notes","scripted","token","trip_id", where scripted is a binary variable
where scripted=1 and ordinary=0. "trip_id" helps to identify which snippet, belongs
to which trip.
Each element in the list corresponds to one of the specified target columns (in the same sequence).
"""
return [
Preprocessor.get_cut_trip_snippets_for_target(
dfs=dfs,
snippet_length=snippet_length,
sensor_type=sensor_type,
target_column_name=target_column
)
for target_column in target_column_names
]
@staticmethod
def get_cut_trip_snippets_for_target(dfs, snippet_length=30, sensor_type="acceleration", target_column_name: str = "total"):
"""
This method gets a dictionary of trips per token and cuts them in the
specified snippet_length. It uses the one dimensional column of the specified name
(i. e. one of: "total", "x", "y", "z") in the sensor table.
Parameters
----------
dfs: dictionary with the assumed nested structure
dict[token] = list of trips per token and each trip consists of tables for
at least "annotation" and "sensor"
snippet_length: int, default=30,
specifies the length of the time snippets in seconds
sensor_type: string, default="acceleration"
specifies which sensor type should be used for each entry
target_column_name: string, default="total"
Specifies which column should represent trip observation.
Returns
-------
result: returns a pandas.DataFrame where each row is a snippet with length snippet_length
and each column is one recording step. Each entry corresponds
to the total aka n2 value of the original data. Additional columns are:
"mode","notes","scripted","token","trip_id", where scripted is a binary variable
where scripted=1 and ordinary=0. "trip_id" helps to identify which snippet, belongs
to which trip.
"""
HERTZ_RATE = 20
column_names = ["snippet_"+str(i) for i in range(snippet_length * HERTZ_RATE)]
column_names = column_names + ["mode","notes","scripted","token", "trip_id"]
result = pd.DataFrame(columns=column_names)
trip_index = 0
for token_i, trips in sorted(dfs.items()):
for trip_i in trips:
sensor_data, mode, notes, scripted = Preprocessor._get_row_entries_for_trip(trip_i, sensor_type=sensor_type)
splitted_trip = Preprocessor._cut_trip(
sensor_data=sensor_data,
target_column_name=target_column_name,
snippet_length=snippet_length,
column_names=column_names
)
splitted_trip["mode"]=mode
if str(notes).lower() == "nan":
splitted_trip["notes"]="empty"
else:
splitted_trip["notes"]=notes
splitted_trip["scripted"]=scripted
splitted_trip["token"]=token_i
splitted_trip["trip_id"]=trip_index
trip_index += 1
result = pd.concat([result, splitted_trip])
result.reset_index(drop=True, inplace=True)
return result
@staticmethod
def calculate_distance_for_n2(data, metric="euclidean"):
"""
This method calculates the specified distance metric for norms of the x,y,z signal,
also called n2 or total in the assignment.
Parameters
----------
data: pandas.DataFrame of the trip segments and the
["mode","notes","scripted","token", "trip_id"] columns
metric: string, default="euclidean",
specifies which distance metric method should be used. The distance is calculated
with the highly optimized cdist function of scipy.spatial.distance.
This makes it simple to use a wide variety of distance metrics, some
of them listed below.
Mandatory Distance Calculations:
"euclidean" : calculates the euclidean distance
"cityblock" : calculates the manhattan distance
"cosine" : calculates the cosine distance
"dtw" : Calculates distance with dynamic time warping. Utilizes l1 norm.
for a full list of all distances see:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html
Returns
-------
result: returns a pandas.DataFrame where each each point in the distance matrix
is the distance of one trip segment to another one and each row of the
distance matrix corresponds to the trips segment distances to all other
trip segments. Additional columns are: "mode","notes","scripted","token",
where scripted is a binary variable where scripted=1 and ordinary=0
Note that the dimensionality of the result can be (for most cases)
different to the dimensionality of the incoming data pandas.DataFrame.
"""
categorical_colnames=["mode","notes","scripted","token", "trip_id"]
small_df = data.drop(categorical_colnames, axis=1)
#column_names = list(small_df.columns.values)
nr_of_rows = small_df.shape[0]
nr_of_columns = small_df.shape[1]
# The new dataframe has dimensionality of nr_of_rows x nr_of_rows
column_names = ["distance_"+str(i) for i in range(nr_of_rows)]
result = pd.DataFrame(columns=column_names)
distance_matrix = \
cdist(small_df, small_df, metric=metric) if metric != 'dtw' \
else Preprocessor._calculate_distance_with_dtw(small_df, 1)
result = pd.concat([result, pd.DataFrame(distance_matrix, columns=column_names)])
# Reappend the categorical columns
for colname in categorical_colnames:
result[colname] = data[colname]
return result
@staticmethod
def calculate_distance_for_individual_columns(dataframes: list):
"""
This method calculates the specified distance metric for the individual x, y, z columns.
Note that due to the data structure required for calculating distances between the individual columns currently
only the Euclidean norm is supported, since I haven't found a way to concile scipy's cdist-function with the
additional dimension (individual columns) in the dataset.
Parameters
----------
dataframes: List of pandas.DataFrame of the trip segments and the
["mode","notes","scripted","token", "trip_id"] columns with length 3 - has to contain dataframe
for columns "x", "y" and "z".
Returns
-------
result: returns a pandas.DataFrame where each each point in the distance matrix
is the distance of one trip segment to another one and each row of the
distance matrix corresponds to the trips segment distances to all other
trip segments. Additional columns are: "mode","notes","scripted","token",
where scripted is a binary variable where scripted=1 and ordinary=0
Note that the dimensionality of the result can be (for most cases)
different to the dimensionality of the incoming data pandas.DataFrame.
"""
categorical_colnames=["mode","notes","scripted","token", "trip_id"]
# Drop categorical column names for all dataframes.
small_dfs = [data.drop(categorical_colnames, axis=1) for data in dataframes]
# The new dataframe has dimensionality of nr_of_rows x nr_of_rows
nr_of_rows = small_dfs[0].shape[0]
column_names = ["distance_" + str(i) for i in range(nr_of_rows)]
result = pd.DataFrame(columns=column_names)
# Calculating distance matrix manually, since cdist(...) doesn't take 3D-arrays and I don't know how to solve
# this more elegantly.
distance_matrix = np.zeros([nr_of_rows, nr_of_rows])
for i in range(0, nr_of_rows):
for j in range(i + 1, nr_of_rows):
distance_matrix[i, j] = np.sqrt(
(
(small_dfs[0].iloc[i] - small_dfs[0].iloc[j]) ** 2 +
(small_dfs[1].iloc[i] - small_dfs[1].iloc[j]) ** 2 +
(small_dfs[2].iloc[i] - small_dfs[2].iloc[j]) ** 2
).sum()
)
distance_matrix[j, i] = distance_matrix[i, j]
result = pd.concat([result, pd.DataFrame(distance_matrix, columns=column_names)])
# Reappend the categorical columns
for colname in categorical_colnames:
result[colname] = dataframes[0][colname]
return result
@staticmethod
def _calculate_distance_with_dtw(data, norm: int = 1):
"""
Calculates metric for specified dataframe using dynamic time warping utilizing norm.
:param data:
:param norm: Defines which L-norm is to be used.
:return result: A 2D-nd-array containing distance from each segment to each other segment (same as with scipy's
cdist() - zeros(shape, dtype=float, order='C'))
"""
# Initialize empty distance matrix.
dist_matrix = np.zeros((data.shape[0], data.shape[0]), dtype=float)
# Note regarding multithreading: Splitting up by rows leads to imbalance amongst thread workloads.
# Instead, we split up all possible pairings to ensure even workloads and collect the results (and assemble
# the distance matrix) after the threads finished their calculations.
# Generate all pairings.
segment_pairings = [(i, j) for i in range(0, data.shape[0]) for j in range(0, data.shape[0]) if j > i]
# Set up multithreading. Run as many threads as logical cores are available on this machine - 1.
num_threads = psutil.cpu_count(logical=True)
threads = []
for i in range(0, num_threads):
# Calculate distance with fastDTW between each pairing of segments. Distances between elements to themselves
# are ignored and hence retain their intial value of 0.
thread = DTWThread(thread_id=i,
num_threads=num_threads,
segment_pairings=segment_pairings,
distance_matrix=dist_matrix,
data_to_process=data,
norm=norm)
threads.append(thread)
thread.start()
# Wait for threads to finish.
for thread in threads:
thread.join()
return dist_matrix
@staticmethod
def _cut_trip(sensor_data, target_column_name: str, snippet_length=30, column_names=None):
"""
Helper function to cut one trip into segments of snippet_length
and return the new pandas.DataFrame that includes the "total"
of each value.
:param target_column_name: Name of column to use as observation in trip (i. e. one of: "total", "x", "y", "z").
"""
HERTZ_RATE = 20
nr_of_trip_columns = HERTZ_RATE * snippet_length
categorical_colnames = ["mode","notes","scripted","token", "trip_id"]
if column_names is None:
column_names = ["snippet_"+str(i) for i in range(nr_of_trip_columns)]
column_names = column_names + categorical_colnames
result = pd.DataFrame(columns=column_names).drop(categorical_colnames, axis=1)
copied_sensor_data = sensor_data.reset_index(drop=True)
copied_sensor_data = copied_sensor_data
end_index = copied_sensor_data.index[-1]
# // floor division syntax
# the last segment wich is smaller than 30 seconds will be dropped
nr_of_rows = end_index // nr_of_trip_columns
start_index = 0
for row_index in range(nr_of_rows):
to_index = start_index + nr_of_trip_columns
row_i = copied_sensor_data.loc[start_index:to_index-1, target_column_name]
result.loc[row_index,:] = list(row_i)
start_index = to_index
return result
@staticmethod
def _get_row_entries_for_trip(trip, sensor_type="acceleration"):
"""
Helper function which splits on trip into the four parts
sensor_data, mode, notes and scripted, where scripted is
a binary variable where scripted=1 and ordinary=0
"""
sensor_data, mode, notes, scripted = None, None, None, None
for table_name, table_content in trip.items():
if table_name == "sensor":
sensor_data = table_content[table_content["sensor"] == sensor_type]
if table_name == "annotation":
annotation_data = table_content
mode = annotation_data["mode"][0]
notes = annotation_data["notes"][0]
if "scripted" in str(notes).lower():
scripted = 1
else:
scripted = 0
return sensor_data, mode, notes, scripted
@staticmethod
def unpack_all_trips(dfs: dict, keep_tokens=False):
"""
Helper method that takes a dictionary of the trips per token and returns a list
of all trips. Assumed nested structure is:
dict[token] = list of trips per token
:param keep_tokens: bool, default=False,
if True, the token is appended to the annotation dataframe.
This makes it easier to identify the trips later.
"""
result = []
dfs_copy = deepcopy(dfs)
for token, trips in sorted(dfs_copy.items()):
if keep_tokens:
for trip_i in trips:
if trip_i["annotation"] is not None:
trip_i["annotation"]["token"]=token
result += trips
return result
@staticmethod
def restore_preprocessed_data_from_disk(filename: str):
"""
Loads pickled object from disk.
:param filename: File name/relative path in /data/preprocessed.
:return: Dictionary holding data for tokens (same format as returned by Preprocessor.preprocess().
"""
data_dir = DatasetDownloader.get_data_dir()
full_path = os.path.join(data_dir, "preprocessed", filename)
with open(full_path, "rb") as file:
preprocessed_data = file.read()
# https://media.giphy.com/media/9zXWAIcr6jycE/giphy.gif
return pickle.loads(preprocessed_data)
@staticmethod
def _filter_nan_values(dataframes: list, properties_to_check: list, allowed_nan_ratio: float = 0.2):
"""
Filter NAN values from dataframes sensor data. Note that dataframe is dismissed if at least (!) one of the
specified columns exceeds the allowed ratio of NANs.
:param dataframes:
:param properties_to_check: Properties to check for NAN values (e.g.: "sensor", "location").
:param allowed_nan_ratio: Dataframe is removed if ratio (0 to 1) of NAN values relative to total count
exceeds defined threshold.
:return:
"""
filtered_dataframes = []
for i, df in enumerate(dataframes):
# Check if threshold was reached for one of the specified columns.
threshold_reached = True if np.count_nonzero(
[
df[prop].isnull().sum().sum() / float(len(df[prop])) > allowed_nan_ratio
for prop in properties_to_check
]
) > 0 else False
# Dismiss dataframe if share of NAN values is above defined_threshold.
if not threshold_reached:
# Drop rows with NANs.
for key in properties_to_check:
df[key].dropna(axis=0, how='any', inplace=True)
# Append to list.
filtered_dataframes.append(df)
return filtered_dataframes
@staticmethod
def _recalculate_accerelometer_2norm(resampled_dataframes):
"""
Recalculates 2-norm for x-/y-/z-values in accerelometer data.
Note that the original column 'total' is overwritten with the new value.
:param resampled_dataframes:
:return: List of dataframes with updated values for column 'total'.
"""
for i, df in enumerate(resampled_dataframes):
# Chain x-/y-/z-valuees for all entries in current dataframe and apply 2-norm on resulting (2, n)-shaped
# vector.
resampled_dataframes[i]["total"] = np.linalg.norm(
np.array([df["x"], df["y"], df["z"]]),
ord=2, axis=0
)
return resampled_dataframes
@staticmethod
def _cut_off_start_and_end_in_dataframes(dataframes, list_of_dataframe_names_to_cut, cutoff_in_seconds=30):
"""
Auxiliary method with boilerplate code for cutting off start and end of timeseries in specified list of
dataframes.
:param dataframes:
:param list_of_dataframe_names_to_cut:
:param cutoff_in_seconds:
:return: List of cleaned/cut dataframes.
"""
trips = {"scripted": {"TRAM": 0, "METRO": 0, "WALK": 0}, "unscripted": {"TRAM": 0, "METRO": 0, "WALK": 0}}
for i, df in enumerate(dataframes):
# Assuming "notes" only has one entry per trip and scripted trips' notes contain the word "scripted",
# while ordinary trips' notes don't.
if "scripted" in str(df["annotation"]["notes"][0]).lower():
trips["scripted"][df["annotation"]["mode"][0]] += 1
for dataframe_name in list_of_dataframe_names_to_cut:
# Cut off time series data.
dataframes[i][dataframe_name] = Preprocessor._cut_off_start_and_end_in_dataframe(
dataframe=df[dataframe_name], cutoff_in_seconds=cutoff_in_seconds
)
else:
trips["unscripted"][df["annotation"]["mode"][0]] += 1
return dataframes
@staticmethod
def _cut_off_start_and_end_in_dataframe(dataframe, cutoff_in_seconds=30):
"""
Removes entries with first and last cutoff_in_seconds in series.
Assumes time in dataframe is specified in milliseconds.
:param dataframe: Dataframe containing time series data. Expects specified dataframe to have a column "time".
:param cutoff_in_seconds:
:return: Cleaned dataframe.
"""
# Only cut if enough values exist. If not (e. g. "location" data not available) return None.
if not dataframe.empty:
# Calculate time thresholds.
lower_time_threshold = dataframe.head(1)["time"].values[0]
upper_time_threshold = dataframe.tail(1)["time"].values[0]
# Assuming time is specified as UTC timestamp in milliseconds, so let's convert the cutoff to milliseconds.
cutoff_in_seconds *= 1000
# Drop all rows with a time value less than 30 seconds after the initial entry and less than 30 seconds
# before the last entry.
dataframe = dataframe[
(dataframe["time"] >= lower_time_threshold + cutoff_in_seconds) &
(dataframe["time"] <= upper_time_threshold - cutoff_in_seconds)
]
return dataframe
else:
return None
@staticmethod
def _resample_trip_time_series(dataframes):
"""
Resamples trips' time series to hardcoded value (? per second).
Returns list of dataframes with resampled time series.
:param dataframes: List of dataframes with trip information.
:return: List of resampled time series.
"""
return [
Preprocessor.downsample_time_series_per_category(df["sensor"], categorical_colnames=["sensor"])
for df in dataframes
]
@staticmethod
def _remove_dataframes_without_annotation(dataframes):
"""
Removes dataframes w/o annotation data (since we don't know the transport mode and hence can't use it for
training.
:param dataframes: ist of dataframes with trip data.
:return:
"""
filtered_dataframes = []
for df in dataframes:
if ("annotation" in df.keys()) and (not df["annotation"].empty):
filtered_dataframes.append(df)
return filtered_dataframes
@staticmethod
def _remove_dataframes_by_duration_limit(dataframes, min_duration=0, max_duration=sys.maxsize):
"""
Removes dataframes outside the defined time thresholds.
:param dataframes: ist of dataframes with trip data.
:param min_duration: Minimum duration in seconds.
:param max_duration: Maximum duration in seconds.
:return:
"""
# Fetch summaries for all trips.
trip_summaries = Preprocessor.get_trip_summaries(dataframes, convert_time=True)
filtered_dataframes = []
for i, df in enumerate(dataframes):
trip_length = trip_summaries.iloc[i]["trip_length"].total_seconds()
if trip_length >= min_duration and trip_length >= min_duration:
filtered_dataframes.append(df)
return filtered_dataframes
@staticmethod
def _convert_timestamps_from_dataframe(df, unit="ms", time_col_names=["time", "gpstime"]):
"""
This method converts integer timestamp columns in a pandas.DataFrame object
to datetime objects.
DataFrames in mobility data with datetime columns:
cell, event, location, marker, sensor
Parameters
----------
data: input data pandas DataFrame.
unit: string, default="ms"
time unit for transformation of the input integer timestamps.
Possible values: D,s,ms,us,ns
see "unit" at: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.to_datetime.html
for further information.
Returns
-------
result: returns a deepcopy of the data with transformed time columns.
"""
result = pd.DataFrame()
if df is not None:
df_column_names = list(df.columns.values)
if any(name_i in time_col_names for name_i in df_column_names):
result = deepcopy(df)
for time_column_name in time_col_names:
if time_column_name in df_column_names:
index_copy = result.index
result.set_index(time_column_name,inplace=True)
result.index = pd.to_datetime(result.index, unit=unit)
result.reset_index(inplace=True)
result.index = index_copy
return result
@staticmethod
def _convert_timestamps_from_dictionary_of_dataframes(d, unit="ms", time_col_names=["time","gpstime"]):
""" Convenience function to loop over dicionary of one track recording.
"""
result = dict()
for df_name, df in d.items():
result[df_name] = Preprocessor._convert_timestamps_from_dataframe(df,unit=unit, time_col_names=time_col_names)
return result
@staticmethod
def _convert_timestamps_from_list_of_total_trips(all_trips, unit="ms", time_col_names=["time","gpstime"]):
""" Convenience function to loop over list af all track recordings.
"""
result = []
for i, trip_i in enumerate(all_trips):
result.append(Preprocessor._convert_timestamps_from_dictionary_of_dataframes(trip_i, unit=unit, time_col_names=time_col_names))
return result
@staticmethod
def convert_timestamps(data, unit="ms", time_col_names=["time","gpstime"]):
"""
This function converts the integer timestamps in the specified columns to
datetime objects in the format YYYY-MM-DD HH-MM-SS-uu-.., where uu stands for
the specified unit.
It is assumed that the time colums are integer as it is the case for the mobility data.
Accepted input types are pandas.DataFrame, dict, list which follow the convention
of the projects nesting structure. Special case if data is of type pandas.DataFrame
then the behaviour of this function equals _convert_timestamps_from_dataframe:
Parameters
----------
data: input data, can be a list of all tracks, a dict of one track or a
pandas DataFrame of one table.
unit: string, default="ms"
time unit for transformation of the input integer timestamps.
Possible values: D,s,ms,us,ns
see "unit" at: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.to_datetime.html
for further information.
time_col_names: list of strings, default=["time","gpstime"]
names of the time colums in the table which should be transformed.
Returns
-------
result: returns a deepcopy of the data with transformed time columns.
The datatype of data will be the same as of the input type. Accepted input types are
pandas.DataFrame, dict, list.
"""
result = pd.DataFrame()
if type(data) is pd.DataFrame:
result = Preprocessor._convert_timestamps_from_dataframe(data, unit, time_col_names)
elif type(data) is dict:
result = Preprocessor._convert_timestamps_from_dictionary_of_dataframes(data, unit, time_col_names)
elif type(data) is list:
result = Preprocessor._convert_timestamps_from_list_of_total_trips(data, unit, time_col_names)
return result
@staticmethod
def downsample_time_series(series, time_interval="S", time_col_name="time"):
"""
Downsamples a pandas time series DataFrame from milliseconds to a new
user specified time interval. The aggregation for the new time bins will be
calculated via the mean. To make sure that the right time column is
used you have to set the time columns name in time_col_name or set it as
index before calling this function.
Otherwise it is assumed that the time column has the name time_col_name="time".
For further information about examples for pandas resampling function see:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling
https://machinelearningmastery.com/resample-interpolate-time-series-data-python/
Parameters
----------
series: a pandas DataFrame object with a DatetimeIndex, if there is no
DatetimeIndex set, it is assumed that there is a Datetime column with
name time_col_name="time"
time_interval: string, default="S",
specifies the new time interval to which the series will be downsampled.
Valid values are "S" for seconds, "T" for minutes etc. It is also possible
to sample in a special interval e.g. 5 seconds, by passing "5S".
For all possible frequencies see:
https://stackoverflow.com/questions/17001389/pandas-resample-documentation#17001474
time_col_name: string, default="time"
The name of the time column name.
Returns
-------
data: returns the data with downsampled time columns, where each new bin
is aggregated via the mean.
"""
if isinstance(series.index, pd.DatetimeIndex):
resampled = series.resample(time_interval).mean()
elif time_col_name in list(series.columns.values):
# In case the column has not been converted to Datetime object
# it will be converted here.
if series[time_col_name].dtype in [np.dtype("Int64")]:
series = deepcopy(series)
series = Preprocessor.convert_timestamps(series, time_col_names=[time_col_name])
resampled = series.set_index(time_col_name).resample(time_interval).mean()
resampled = resampled.reset_index()
else:
resampled = series
return resampled
@staticmethod
def downsample_time_series_per_category(series, categorical_colnames, time_interval="S", time_col_name="time"):
"""
Downsamples a pandas time series DataFrame from milliseconds to a new
user specified time interval and takes care of the right interpolation of categorical variables.
The aggregation for the new time bins will be calculated via the mean.
To make sure that the right time column is used you have to set the time
columns name in time_col_name.
Otherwise it is assumed that the time column has the name time_col_name="time".
For further information about examples for pandas resampling function see:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling
https://machinelearningmastery.com/resample-interpolate-time-series-data-python/
Parameters
----------
series: a pandas DataFrame object with a DatetimeIndex, if there is no
DatetimeIndex set, it is assumed that there is a Datetime column with
name time_col_name="time"
categorical_colnames: a list of strings of colum names
e.g. ["sensor"]
time_interval: string, default="S",
specifies the new time interval to which the series will be downsampled.
Valid values are "S" for seconds, "T" for minutes etc. It is also possible
to sample in a special interval e.g. 5 seconds, by passing "5S".
For all possible frequencies see:
https://stackoverflow.com/questions/17001389/pandas-resample-documentation#17001474
time_col_name: string, default="time"
The name of the time column name. set to "index" if you want to transform
the index column
Returns
-------
data: returns the data with downsampled time columns, where each new bin
is aggregated via the mean and keeps the categorical values.
"""
copied_series = deepcopy(series)
series_column_names = list(copied_series.columns.values)
result = pd.DataFrame(columns = series_column_names)
# In case the column or index has not been converted to Datetime object
# it will be converted here.
if (time_col_name=="index") and (copied_series.index.dtype in [np.dtype("Int64")]):
copied_series.index = pd.to_datetime(copied_series.index, unit="ms")
if time_col_name in series_column_names:
if copied_series[time_col_name].dtype in [np.dtype("Int64")]:
copied_series = Preprocessor._convert_timestamps_from_dataframe(copied_series, time_col_names=[time_col_name])
# Start actual downsampling
if isinstance(copied_series.index, pd.DatetimeIndex) or (time_col_name in series_column_names):
for categorical_colname_i in categorical_colnames:
categories = list(copied_series[categorical_colname_i].unique())
for category_i in categories:
series_for_category = copied_series[copied_series[categorical_colname_i]==category_i]
resampled = Preprocessor.downsample_time_series(series_for_category, time_interval, time_col_name)
resampled[categorical_colname_i] = category_i
result = pd.concat([result, resampled])
if isinstance(result.index, pd.DatetimeIndex):
result = result.sort_index()
else:
result = result.set_index(time_col_name).sort_index()
# need to reset index otherwise indices could be not unique anymore
result = result.reset_index()
else:
result = copied_series
return result
@staticmethod
def get_trip_summaries(all_trips, convert_time=False):
"""
This method returns a summary of all recorded trips. The summary includes start,
stop time, trip_length, recording mode and notes.
Parameters
----------
all_trips : a list of all trips
convert_time : bool, default=False
indicates whether or not the time values should be converted to datetime
objects.
Returns
-------
result : pandas DataFrame
a pandas dataframe with the summaries for each trip
"""
nr_of_recorded_trips = len(all_trips)
result = pd.DataFrame()
if convert_time:
all_trips_copy = Preprocessor.convert_timestamps(all_trips)
else:
all_trips_copy = all_trips
start_times = []
end_times = []
for index in range(0, nr_of_recorded_trips):
trip_i = all_trips_copy[index]
if ("annotation" in trip_i.keys()) and (not trip_i["annotation"].empty):
result = pd.concat([result, trip_i["annotation"]])
start_times.append(trip_i["marker"].iloc[0,0])
end_times.append(trip_i["marker"].iloc[-1,0])
result["Start"] = start_times
result["Stop"] = end_times
result["trip_length"] = [end-start for end,start in zip(end_times,start_times)]
result = result.reset_index(drop=True)
return result
@staticmethod
def extract_csv_file_name(csv_name):
"""
Extracts the name from the csv file name e.g. annotation, cell, event, location,
mac, marker, sensor.
Parameters
----------
csv_name: full name of the csv file in tar.gz directory
Returns
-------
extracted_name: string,
"""
csv_name = str(csv_name)
extracted_name = ""
for name in DatasetDownloader.VALID_NAMES:
if name in csv_name:
extracted_name = name
return extracted_name
return extracted_name
@staticmethod
def read_tar_file_from_dir(file_path):
"""
This method reads a tar.gz file from a specified file path and appends each
.csv file to a dictionary where the key is specified as one of the VALID_NAMES:
["annotation", "cell", "event", "location", "mac", "marker", "sensor"], which
are the names given to identify the different collected mobility data.
"""
tar = tarfile.open(file_path, "r:gz")
csv_files_per_name = {}
for member in tar.getmembers():
f = tar.extractfile(member)
if f is not None:
name = Preprocessor.extract_csv_file_name(member)
csv_files_per_name[name] = pd.read_csv(f, header=0, sep=',', quotechar='"')
tar.close()
return csv_files_per_name
@staticmethod
def get_data_per_trip(dir_name="raw"):
"""
This method reads all downloaded data and returns a list of dictionaries
which include the pandas dataframes for each trip. Each trip DataFrame
can be accessed via its name e.g. annotation, cell, event, location,
mac, marker, sensor.
Parameters
-------
dir_name : string, default="raw",
specifies the name of the directory inside the data directory from which
the data should be read.
Returns
-------
data_frames : a list of pandas DataFrame's in a dictionary
"""
file_path = os.path.join(DatasetDownloader.get_data_dir(), dir_name)
tar_file_names = DatasetDownloader.get_file_names(file_path)
dfs = []
for tar_name in tar_file_names:
path_to_tar_file = os.path.join(file_path, tar_name)
csv_files_per_name = Preprocessor.read_tar_file_from_dir(path_to_tar_file)
dfs.append(csv_files_per_name)
return dfs
@staticmethod
def get_data_per_token(token):
"""
This method reads the downloaded data for one user and returns a list of dictionaries
which include the pandas dataframes for each trip. Each trip DataFrame
can be accessed via its name e.g. annotation, cell, event, location,
mac, marker, sensor.
Returns
-------
data_frames : a list of pandas DataFrame's in a dictionary
"""
file_path = os.path.join(DatasetDownloader.get_data_dir(), "raw")
tar_file_names = DatasetDownloader.get_file_names_for(file_path, token)
dfs = []
for tar_name in tar_file_names:
path_to_tar_file = os.path.join(file_path, tar_name)
csv_files_per_name = Preprocessor.read_tar_file_from_dir(path_to_tar_file)
dfs.append(csv_files_per_name)
return dfs
@staticmethod
def _get_shallow_copy(dfs: list):
""" Helper function to get a shallow copy of the list of dictionaries
as only sensor data is modified and the rest can be references.
"""
nr_of_trips = len(dfs)
result = [{} for trip in range(nr_of_trips)]
for trip_index, trip_i in enumerate(dfs):
for key, values in trip_i.items():
if key == "sensor":
result[trip_index][key] = None
else:
result[trip_index][key] = values
return result
@staticmethod
def calculate_paa(dfs, verbose=False):
newDict = Preprocessor._get_shallow_copy(dfs)
nr_of_trips = len(dfs)
for i in range(0, nr_of_trips):
if verbose:
print('Frame ', i)
#get single trip
sensor_trip = dfs[i]['sensor']
#get all sensors
sensor_set = set(sensor_trip['sensor'])
#create new data frame
helper = pd.DataFrame()
for sensor in sensor_set:
if verbose:
print("sensor: ", sensor)
sensor_data = sensor_trip[sensor_trip['sensor'] == sensor]
if verbose:
print('init time frame')
print(Preprocessor.convert_timestamps(sensor_data.head(1)))
print(Preprocessor.convert_timestamps(sensor_data.tail(1)))
sensor_data = sensor_data.drop(['sensor', 'total'], axis=1)
sensor_data.reset_index(drop=True,inplace=True)
sensor_data_approximated = Preprocessor.approx_sensor(sensor_data, 100)
start_index = 0
stop_index = 1
end_of_df = len(sensor_data_approximated)
buffer_helper = pd.DataFrame()
filler = pd.DataFrame()
if verbose:
print("end_of_df:", end_of_df)
while stop_index <= end_of_df:
if start_index + 30000 <= end_of_df:
stop_index = stop_index + 30000
else:
stop_index = end_of_df+1
buffer_helper = Preprocessor.normalize_trip(sensor_data_approximated.iloc[start_index:stop_index,:])
filler = filler.append(buffer_helper)
start_index = stop_index
filler['sensor'] = sensor
filler['total'] = np.linalg.norm(np.array([filler['x'], filler['y'], filler['z']]),ord=2, axis=0)
helper = pd.concat([helper,filler])
if verbose:
print("complete frame")
print(Preprocessor.convert_timestamps(helper.head(1))['time'])
print(Preprocessor.convert_timestamps(helper.tail(1))['time'])
print('----------------------------')
newDict[i]['sensor'] = helper
return Preprocessor.convert_timestamps(newDict)
@staticmethod
def approx_sensor(acc, hz=None, atd_ms=None):
"""
This method interpolates the observations at equidistant time stamps.
e.g. specifying hz=20 will result in a data frame containing 20 observaitons per second.
Returns
-------
df : a pandas DataFrame containing the interpolated series
"""
# interpolate to a common sampling rate
#
# acc ... data.table(time,x,y,z)
# atd_ms ... approximated time difference in milliseconds, default value = 10
if(hz is None and atd_ms is None):
atd_ms = 10
elif (hz is not None and atd_ms is None):
atd_ms = 1000/hz
elif (hz is not None and atd_ms is not None):
print("hz is overruled with atd_ms")
new_time = np.arange(acc['time'][0], acc['time'][len(acc['time'])-1], atd_ms)
f_ax = interp1d(acc['time'],acc['x'])
ax = list(f_ax(new_time))
f_ay = interp1d(acc['time'],acc['y'])
ay = list(f_ay(new_time))
f_az = interp1d(acc['time'],acc['z'])
az = list(f_az(new_time))
df = pd.DataFrame({
'time':new_time,
'x':ax,
'y':ay,
'z':az,
'total': np.linalg.norm(
np.array([ax, ay, az]),
ord=2, axis=0
)
})
return df
@staticmethod
def normalize_trip(trip):
"""
This method performs a Piecewise Aggregate Approximation of a trip.
trip... a dataframe which should be used
w_size... the bin size.
REQUIREMENT: package 'future'
Returns
-------
df : a pandas DataFrame containing the interpolated series
"""
paa = PAA(window_size=5, output_size=None, overlapping=False)
container = []
for label in trip.columns:
# this creates a new object, change to float32 increases speed
arr = np.array([trip[label]], dtype=np.float64)
transf = paa.transform(arr)
container.append(list(transf[0]))
df = pd.DataFrame(container,trip.columns).T
df['time'] = [int(i) for i in df['time']]
return df
# Note by rmitsch: Commented out since variable 'feature' is not defined. To remove?
# @staticmethod
# def plot_paa(sensor_data, w_size=5, seconds=2):
#
#
# plot_paa(feature, window_size=w_size,output_size=None,overlapping=False,marker='o')
@staticmethod
def print_start_and_end_of_recording_per_sensor(df):
set_of_sensors = set(df['sensor'])
for sensor in set_of_sensors:
print("sensor: ", sensor)
# get all sensor data for specific sensor
sensor_data = deepcopy(df[df["sensor"] == sensor])
sensor_data.reset_index(drop=True,inplace=True)
start = min(sensor_data['time'])
start = | pd.to_datetime(start, unit="ms") | pandas.to_datetime |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from powersimdata.design.generation.clean_capacity_scaling import (
add_new_capacities_collaborative,
add_new_capacities_independent,
add_shortfall_to_targets,
)
def test_independent_new_capacity():
area_names = ["Pacific", "Atlantic", "Arctic", "Indian"]
# Atlantic tests a 'simple' case
# Pacific tests expected additional curtailment
# Arctic tests external additional clean energy
# Indian tests new capacity solar percentage
targets = pd.DataFrame(
{
"ce_target_fraction": [0.25, 0.3, 0.25, 0.25],
"allowed_resources": [
"solar,wind,geo",
"solar, wind, geo, hydro",
"solar,wind,geo",
"solar, wind, geo",
],
"demand": [2e8, 3e8, 2e8, 2e8],
"solar_percentage": [None, None, None, 0.75],
"external_ce_addl_historical_amount": [0, 0, 1.4e7, 0],
"geo.prev_capacity": [4000, 4500, 4000, 4000],
"geo.prev_cap_factor": [1, 1, 1, 1],
"geo.prev_generation": [8e6, 8.5e6, 8e6, 8e6],
"hydro.prev_capacity": [3900, 4400, 3900, 3900],
"hydro.prev_cap_factor": [1, 1, 1, 1],
"hydro.prev_generation": [7e6, 7.5e6, 7e6, 7e6],
"solar.prev_capacity": [3700, 4200, 3700, 3700],
"solar.prev_cap_factor": [0.25, 0.3, 0.215379, 0.215379],
"solar.prev_generation": [8.1252e6, 1.106784e7, 7e6, 7e6],
"wind.prev_capacity": [3600, 4100, 3600, 3600],
"wind.prev_cap_factor": [0.4, 0.35, 0.347854, 0.347854],
"wind.prev_generation": [1.264896e7, 1.260504e7, 1.1e7, 1.1e7],
},
index=area_names,
)
addl_curtailment = pd.DataFrame(
{
"geo": [0, 0, 0, 0],
"hydro": [0, 0, 0, 0],
"solar": [0.4, 0, 0, 0],
"wind": [0, 0, 0, 0],
},
index=area_names,
)
expected_return = pd.DataFrame(
{
"solar.next_capacity": [
(3700 + 4481.582),
(4200 + 8928.948),
(3700 + 2055.556),
(3700 + 8246.260),
],
"wind.next_capacity": [
(3600 + 4360.459),
(4100 + 8716.354),
(3600 + 2000),
(3600 + 2748.753),
],
"prev_ce_generation": [2.877416e7, 3.967288e7, 2.6e7, 2.6e7],
"ce_shortfall": [2.122584e7, 5.032712e7, 1e7, 2.4e7],
},
index=area_names,
)
targets = add_shortfall_to_targets(targets)
targets = add_new_capacities_independent(
targets, scenario_length=8784, addl_curtailment=addl_curtailment
)
test_columns = [
"prev_ce_generation",
"ce_shortfall",
"solar.next_capacity",
"wind.next_capacity",
]
| assert_frame_equal(targets[test_columns], expected_return[test_columns]) | pandas.testing.assert_frame_equal |
import glob
import os
import tarfile
import luigi
import pandas as pd
import wget
from research_user_interest.utils.template import GokartTask
class GetTextfileTask(GokartTask):
textfile_url: str = luigi.Parameter()
def run(self):
wget.download(self.textfile_url)
self.dump("get text file task")
class ExtractTextfileTask(GokartTask):
tarfile_path: str = luigi.Parameter()
output_path: str = luigi.Parameter()
def requires(self):
return GetTextfileTask()
def run(self):
# 解凍
tar = tarfile.open(self.tarfile_path, "r:gz")
tar.extractall(self.output_path)
tar.close()
self.dump("extract textfile task")
class ExtractMainTextTask(GokartTask):
textfile_path: str = luigi.Parameter()
def requires(self):
return ExtractTextfileTask()
def extract_categories(self):
# カテゴリーのフォルダのみを抽出
categories = [
name
for name in os.listdir(self.textfile_path)
if os.path.isdir(f"{self.textfile_path}/{name}")
]
return categories
def extract_text(self, file_name):
with open(file_name) as text_file:
# 今回はタイトル行は外したいので、3要素目以降の本文のみ使用
title_text = text_file.readlines()[2:]
title = title_text[0].strip()
text = title_text[1:]
# titleの前処理
title = title.translate(
str.maketrans({"\n": "", "\t": "", "\r": "", "\u3000": ""})
)
# 3要素目以降にも本文が入っている場合があるので、リストにして、後で結合させる
text = [sentence.strip() for sentence in text] # 空白文字(スペースやタブ、改行)の削除
text = list(filter(lambda line: line != "", text))
text = "".join(text)
text = text.translate(
str.maketrans({"\n": "", "\t": "", "\r": "", "\u3000": ""})
) # 改行やタブ、全角スペースを消す
return title, text
def run(self):
# リストに前処理した本文と、カテゴリーのラベルを追加していく
list_title = []
list_body = []
list_label = []
for cat in self.extract_categories():
text_files = glob.glob(f"{self.textfile_path}/{cat}/*.txt")
# 前処理extract_main_txtを実施して本文を取得
text_list = [
(self.extract_text(text_file)[0], self.extract_text(text_file)[1])
for text_file in text_files
]
title = [text[0] for text in text_list]
body = [text[1] for text in text_list]
label = [cat] * len(body) # 文の数だけカテゴリー名のラベルのリストを作成
list_title.extend(title)
list_body.extend(body)
list_label.extend(label)
title_body_label_list = list(zip(list_title, list_body, list_label))
self.dump(title_body_label_list)
class MakeTextLabelDataFrameTask(GokartTask):
def requires(self):
return ExtractMainTextTask()
def run(self):
title_body_label_list = self.load()
title = [text[0] for text in title_body_label_list]
body = [text[1] for text in title_body_label_list]
label = [text[2] for text in title_body_label_list]
df = | pd.DataFrame({"title": title, "body": body, "label": label}) | pandas.DataFrame |
#!/usr/bin/python3
#
# CS224W Fall 2019-2020
# @<NAME>, <NAME>, <NAME>
#
import datetime as datetime
import numpy as np
import os
import pandas as pd
import pathlib
# Where files listed in DATASETS are located.
DATASET_DIR = '/shared/data'
DATASETS = {
'bad': [
'iran_201906_1_tweets_csv_hashed.csv',
],
'benign': [
'json/democratic_party_timelines',
'json/republican_party_timelines',
]
}
# Where processed datasets will be located.
PROCESSED_DATA_DIR = './datasets/compiled'
# ==============================================================================
# Utilities parsing and processing dataframe values
# ==============================================================================
def pd_float_to_int(float_or_nan):
"""
@params [float, possibly nan]
@return [integer, never nan]
Casts the float as an integer, or 0 if it's nan.
"""
return 0 if pd.isnull(float_or_nan) else int(float_or_nan)
def pd_str_to_list(str_or_nan):
"""
@params [a string representing a list or nan]
@returns [a list]
Interprets a string or nan as a list. Empty strings = empty lists.
"""
if pd.notnull(str_or_nan):
return [] if str_or_nan == '' or str_or_nan == '[]' else str_or_nan[1:-1].split(',')
else:
return []
def reformat_datetime(datetime_str, out_format):
"""
@params [a UTC datetime string with a specific format (see below)]
@returns: [a date string in the format of out_format]
Reformats a UTC datetime string returned by the Twitter API for compatibility.
"""
in_format = "%a %b %d %H:%M:%S %z %Y"
parsed_datetime = datetime.datetime.strptime(datetime_str, in_format)
return datetime.datetime.strftime(parsed_datetime, out_format)
# ==============================================================================
# Dataset code
# ==============================================================================
def load_datasets():
"""
@params: [dataset_grouping (str)]
@returns: (Pandas Dataframe)
Reads all csv's from dataset_grouping's input partition in DATASETS, and
concatenates these to a single pandas dataframe. Returns the dataframe.
"""
li = []
for dataset in DATASETS['bad']:
path = os.path.join(DATASET_DIR, dataset)
print('Reading data from %s' % path)
df = pd.read_csv(path)
df = format_csv_df(df)
li.append(df)
return pd.concat(li, axis=0, ignore_index=True)
def load_json():
"""
@params: []
@returns: (Pandas Dataframe)
Reads all json's from dataset_grouping's input partition in DATASETS, and
concatenates these to a single pandas dataframe. Returns the dataframe.
"""
li = []
for dataset in DATASETS['benign']:
path = os.path.join(DATASET_DIR, dataset)
print('Reading data from %s' % path)
df = pd.read_json(path, lines=True)
df = convert_to_csv_df(df)
li.append(df)
return | pd.concat(li, axis=0, ignore_index=True) | pandas.concat |
__author__ = 'saeedamen'
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
plotly_examples
Shows how to plot using Plotly library (uses Jorge Santos' Cufflinks wrapper)
"""
import datetime
from chartesians.graphs.graphproperties import GraphProperties
from chartesians.graphs.plotfactory import PlotFactory
from pythalesians.market.loaders.lighttimeseriesfactory import LightTimeSeriesFactory
from pythalesians.market.requests.timeseriesrequest import TimeSeriesRequest
from pythalesians.timeseries.calcs.timeseriescalcs import TimeSeriesCalcs
if True:
import pandas
df = | pandas.read_csv("volsurface.csv") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
"""
This file is part of MADIP: Molecular Atlas Data Integration Pipeline
This module loads the data for step_1_collect_protein_data.ipynb jupyter notebook.
Age in days is only approximate and not involved in the downstream analysis. Qualitative age category is defined based on the original data sources.
Copyright 2021 Blue Brain Project / EPFL
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
def get_hamezah_2019_dataframe():
"""
Return pandas dataframe for Hamezah 2019
:return:
pandas.core.frame.DataFrame: dataframe containing Hamezah 2019 data.
"""
print("Importing Hamezah 2019 pandas dataframe.")
# NEWDATA 1. Hamezah 2019 (Mice Hippocampus, Medial Prefrontal Cortex, and Striatum)
hamezah_2019_f = pd.ExcelFile('../data/source_data/Hameezah_2019_formatted.xlsx')
hamezah_2019_hippocampus = hamezah_2019_f.parse("Hippocampus", index_col=None)
hamezah_2019_pfc = hamezah_2019_f.parse("MedialPrefrontalCortex", index_col=None)
hamezah_2019_striatum = hamezah_2019_f.parse("Striatum", index_col=None)
hamezah_2019_hippocampus['location'] = 'hippocampus'
hamezah_2019_pfc['location'] = 'cortex' # prefrontal cortex
hamezah_2019_striatum['location'] = 'striatum'
hamezah_2019 = pd.concat([hamezah_2019_hippocampus, hamezah_2019_pfc, hamezah_2019_striatum])
hamezah_2019['Gene names'] = hamezah_2019['Gene names'].str.upper()
hamezah_2019['Calc: LFQ intensity WT'] = 2 ** hamezah_2019['Average LFQ intensity (Log2) WT-Ctrl']
hamezah_2019['Calc: LFQ intensity Alzheimer Tg'] = 2 ** hamezah_2019['Average LFQ intensity (Log2) Tg-ctrl'] #
hamezah_2019 = hamezah_2019.reset_index(drop=True)
hamezah_2019['Gene names'] = hamezah_2019['Gene names'].str.upper()
hamezah_2019 = hamezah_2019.drop(columns=['Protein names', 'Average LFQ intensity (Log2) WT-Ctrl',
'Average LFQ intensity (Log2) Tg-ctrl',
'Number of\npeptides',
'Maxquant\nscore', 'MS/MS\ncount', 'p-value', 'q-value'
])
hamezah_2019 = hamezah_2019.rename(columns={'Protein\naccession': 'Uniprot',
'Gene names': 'gene_names',
'Molecular\nweight (kDa)': 'molecular_weight_kDa',
'Calc: LFQ intensity WT': 'LFQintensity_WT',
'Calc: LFQ intensity Alzheimer Tg': 'LFQintensity_Alzheimer'
})
hamezah_2019_df = pd.wide_to_long(hamezah_2019, stubnames='LFQintensity',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa', 'location'],
j='condition', sep='_', suffix=r'\w+')
hamezah_2019_df = hamezah_2019_df.reset_index()
hamezah_2019_df['Study'] = 'Hamezah 2019'
hamezah_2019_df['Organism'] = 'mouse'
hamezah_2019_df['Age_days'] = 365 + 3 * 30 + 21
# Five-month-old mice ... for a duration of 10 months -> 15 months
hamezah_2019_df['raw_data_units'] = 'LFQintensity'
hamezah_2019_df = hamezah_2019_df.rename(columns={'LFQintensity': 'raw_data'})
return hamezah_2019_df
def get_hamezah_2018_dataframe():
"""
Return pandas dataframe for Hamezah 2018
:return:
pandas.core.frame.DataFrame: dataframe containing Hamezah 2018 data
"""
# ### Hamezah_2018
print("Importing pandas Hamezah 2018 dataframe.")
hamezah_2018f = pd.ExcelFile('../data/source_data/1-s2.0-S0531556518303097-mmc2.xlsx')
hamezah_2018_hippocampus = hamezah_2018f.parse('Sheet1')
hamezah_2018_pfc = hamezah_2018f.parse('Sheet2') # medial prefrontal cortex
hamezah_2018_striatum = hamezah_2018f.parse('Sheet3')
hamezah_2018_hippocampus['location'] = 'hippocampus'
hamezah_2018_pfc['location'] = 'cortex' # prefrontal cortex
hamezah_2018_striatum['location'] = 'striatum'
hamezah_2018 = pd.concat([hamezah_2018_hippocampus, hamezah_2018_pfc, hamezah_2018_striatum])
hamezah_2018['Gene names'] = hamezah_2018['Gene names'].str.upper()
hamezah_2018['Calc: LFQ intensity 14 months'] = 2 ** hamezah_2018['Average LFQ intensity (Log2) 14 months']
hamezah_2018['Calc: LFQ intensity 18 months'] = 2 ** hamezah_2018['Average LFQ intensity (Log2) 18 months']
hamezah_2018['Calc: LFQ intensity 23 months'] = 2 ** hamezah_2018['Average LFQ intensity (Log2) 23 months']
hamezah_2018['Calc: LFQ intensity 27 months'] = 2 ** hamezah_2018['Average LFQ intensity (Log2) 27 months']
hamezah_2018 = hamezah_2018.reset_index(drop=True)
hamezah_2018['Gene names'] = hamezah_2018['Gene names'].str.upper()
hamezah_2018 = hamezah_2018.drop(columns=['Protein names', 'Average LFQ intensity (Log2) 14 months',
'Average LFQ intensity (Log2) 18 months',
'Average LFQ intensity (Log2) 23 months',
'Average LFQ intensity (Log2) 27 months',
'Number of\npeptides',
'Maxquant\nscore', 'MS/MS\ncount', 'ANOVA significant'
])
hamezah_2018 = hamezah_2018.rename(columns={'Protein\naccession': 'Uniprot',
'Gene names': 'gene_names',
'Molecular\nweight (kDa)': 'molecular_weight_kDa',
'Calc: LFQ intensity 14 months': 'LFQintensity_14months',
'Calc: LFQ intensity 18 months': 'LFQintensity_18months',
'Calc: LFQ intensity 23 months': 'LFQintensity_23months',
'Calc: LFQ intensity 27 months': 'LFQintensity_27months'
})
hamezah_2018_df = pd.wide_to_long(hamezah_2018, stubnames='LFQintensity',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa', 'location'],
j='sample_id', sep='_', suffix=r'\w+')
hamezah_2018_df = hamezah_2018_df.reset_index()
hamezah_2018_df['Study'] = 'Hamezah 2018'
hamezah_2018_df['Organism'] = 'rat'
hamezah_2018_df.loc[hamezah_2018_df['sample_id'] == '14months', 'Age_days'] = 365 + 2 * 30 + 21 # 446 # 365 + 2*30 + 21 # '14 months'
hamezah_2018_df.loc[hamezah_2018_df['sample_id'] == '18months', 'Age_days'] = 365 + 6 * 30 + 21 # 365 + 6*30 +21 # '18 months'
hamezah_2018_df.loc[hamezah_2018_df['sample_id'] == '23months', 'Age_days'] = 721 # 365*2 -30 +21 # '23 months'
hamezah_2018_df.loc[hamezah_2018_df['sample_id'] == '27months', 'Age_days'] = 841 # 365*2 + 30*3 +21 # '27 months'
hamezah_2018_df['raw_data_units'] = 'LFQintensity'
hamezah_2018_df = hamezah_2018_df.rename(columns={'LFQintensity': 'raw_data'})
return hamezah_2018_df
def get_chuang_2018_dataframe():
"""
Return pandas dataframe for Chuang 2018
:return:
pandas.core.frame.DataFrame: dataframe containing Chuang 2018 data.
"""
print("Importing Chuang 2018 pandas dataframe.")
chuang2018f = pd.ExcelFile('../data/source_data/Supporting File S-3_The lists of the proteins identified in the axon and whole-cell samples.xlsx')
chuang2018_axon = chuang2018f.parse('Axon samples, 2548', skiprows=3, index_col=None)
chuang2018_wholecell = chuang2018f.parse('Whole-cell samples, 2752', skiprows=3, index_col=None)
chuang2018_wholecell = chuang2018_wholecell.drop(
['Fasta headers', 'Number of proteins', 'Peptides axon', 'Peptides whole-cell',
'Razor + unique peptides axon', 'Razor + unique peptides whole-cell',
'Score', 'Sequence coverage axon [%]',
'Sequence coverage whole-cell [%]',
'Fasta headers.1', 'Number of proteins.1',
'Peptides axon.1', 'Peptides whole-cell.1',
'Razor + unique peptides axon.1',
'Razor + unique peptides whole-cell.1', 'Score.1',
'Sequence coverage axon [%].1', 'Sequence coverage whole-cell [%].1'], axis=1)
chuang2018_axon = chuang2018_axon.drop(['Fasta headers', 'Number of proteins', 'Peptides axon',
'Peptides whole-cell',
'Razor + unique peptides axon', 'Razor + unique peptides whole-cell',
'Score', 'Sequence coverage axon [%]',
'Sequence coverage whole-cell [%]',
'Fasta headers.1', 'Number of proteins.1',
'Peptides axon.1', 'Peptides whole-cell.1',
'Razor + unique peptides axon.1',
'Razor + unique peptides whole-cell.1', 'Score.1',
'Sequence coverage axon [%].1', 'Sequence coverage whole-cell [%].1'],
axis=1)
chuang2018_axon = chuang2018_axon.rename(columns={'GN': 'gene_names',
'Accession': 'Uniprot',
'Protein IDs': 'Experiment1:Protein IDs',
'Protein IDs.1': 'Experiment2:Protein IDs.1',
'iBAQ axon': 'iBAQ_Experiment1',
'iBAQ axon.1': 'iBAQ_Experiment2'})
chuang2018_wholecell = chuang2018_wholecell.rename(columns={'GN': 'gene_names',
'Accession': 'Uniprot',
'Protein IDs': 'Experiment1:Protein IDs',
'Protein IDs.1': 'Experiment2:Protein IDs.1',
'iBAQ whole-cell': 'iBAQ_Experiment1',
'iBAQ whole-cell.1': 'iBAQ_Experiment2'})
chuang2018_axon['gene_names'] = chuang2018_axon['gene_names'].str.upper()
chuang2018_wholecell['gene_names'] = chuang2018_wholecell['gene_names'].str.upper()
chuang2018_axon['location'] = 'axon'
chuang2018_wholecell['location'] = 'neurons' # 'neuron_whole_cell'
chuang2018 = pd.concat([chuang2018_axon, chuang2018_wholecell], sort=False)
chuang2018 = chuang2018.reset_index(drop=True)
chuang2018 = chuang2018.drop(['Description', 'Experiment1:Protein IDs', 'Experiment2:Protein IDs.1'], axis=1)
chuang2018 = chuang2018.rename(columns={'Mol. weight [kDa]': 'molecular_weight_kDa'})
chuang2018_df = pd.wide_to_long(chuang2018, stubnames='iBAQ',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa', 'location'],
j='sample_id', sep='_', suffix=r'\w+')
chuang2018_df = chuang2018_df.reset_index()
chuang2018_df['Study'] = 'Chuang 2018'
chuang2018_df['Organism'] = 'rat'
chuang2018_df['Age_days'] = 18 # E18
chuang2018_df['Age_cat'] = 'embr'
chuang2018_df['raw_data_units'] = 'iBAQ'
chuang2018_df = chuang2018_df.rename(columns={'iBAQ': 'raw_data'})
return chuang2018_df
def get_duda_2018_dataframe():
"""
Return pandas dataframe for Duda 2018
:return:
pandas.core.frame.DataFrame: dataframe containing Duda 2018 data.
"""
print("Importing Duda 2018 pandas dataframe.")
dudaf = pd.ExcelFile('../data/source_data/dataProt.xlsx')
duda_hippocampus = dudaf.parse('hippocampus')
duda_cerebellum = dudaf.parse('cerebellum')
duda_cortex = dudaf.parse('cortex')
# fill merged cells with the same values
# merged cells processed manually to avoid artefacts
# duda_hippocampus = duda_hippocampus.fillna(method='ffill')
# duda_cerebellum = duda_cerebellum.fillna(method='ffill')
# duda_cortex = duda_cortex.fillna(method='ffill')
duda_hippocampus['location'] = 'hippocampus'
duda_cerebellum['location'] = 'cerebellum'
duda_cortex['location'] = 'cortex'
duda = pd.concat([duda_hippocampus, duda_cerebellum, duda_cortex], sort=False)
duda = duda.reset_index(drop=True)
duda['gene_names'] = duda['gene_names'].str.upper()
# young = 1 month; old = 12 months
duda['duplicated'] = duda.duplicated(subset=['Young Mean concentration',
'Adult Mean concentration', 'location'], keep=False)
duda = duda.drop(columns='duplicated')
duda = duda.rename(columns={'Young Mean concentration': 'MeanConcentration_young',
'Adult Mean concentration': 'MeanConcentration_adult'})
duda_2018_df = pd.wide_to_long(duda, stubnames='MeanConcentration',
i=['gene_names', 'location'],
j='condition', sep='_', suffix=r'\w+')
duda_2018_df = duda_2018_df.reset_index()
duda_2018_df['Study'] = 'Duda 2018'
duda_2018_df['Organism'] = 'mouse'
duda_2018_df.loc[duda_2018_df['condition'] == 'young', 'Age_days'] = 51 # P30 = 21 embryonic days + 30 postnatal
duda_2018_df.loc[duda_2018_df['condition'] == 'adult', 'Age_days'] = 386 # 365 + 21 embryonic days #'12 months'
duda_2018_df['raw_data_units'] = 'Mean concentration [mol/(g total protein)]'
duda_2018_df = duda_2018_df.rename(columns={'MeanConcentration': 'raw_data'})
return duda_2018_df
def get_krogager_2018_dataframe():
"""
Return pandas dataframe for Krogager 2018
:return:
krogager_df :pandas.core.frame.DataFrame: dataframe containing Krogager 2018 data.
"""
print("Importing Krogager 2018 pandas dataframe.")
krogagerf = pd.ExcelFile('../data/source_data/MouseBrainProteomeKrogager2018_supp.xlsx')
krogager = krogagerf.parse('Sheet1')
krogager = krogager.drop(columns=['Significant (S0:1, FDR:0.05)', '-LOG(P-value)',
'Log2(SORT Output / Control Output)', 'Protein names',
'Intensity', 'MS/MS Count'])
# in this case we combine samples due to many NaN in individual samples
col1 = krogager.loc[:, ['Log2(LFQ) Control Output 1', 'Log2(LFQ) Control Output 2', 'Log2(LFQ) Control Output 3',
'Log2(LFQ) Control Output 4', 'Log2(LFQ) Control Output 5', 'Log2(LFQ) Control Output 6']]
krogager['Log2(LFQ) Control median'] = col1.median(axis=1)
col2 = krogager.loc[:, ['Log2(LFQ) SORT Output 1', 'Log2(LFQ) SORT Output 2', 'Log2(LFQ) SORT Output 3']]
krogager['Log2(LFQ) SORT median'] = col2.median(axis=1)
krogager['LFQintensity_control'] = 2 ** krogager['Log2(LFQ) Control median']
krogager['LFQintensity_SORT'] = 2 ** krogager['Log2(LFQ) SORT median']
krogager['Gene names'] = krogager['Gene names'].str.upper()
krogager = krogager.rename(columns={'Gene names': 'gene_names',
'Majority protein IDs': 'Uniprot'
})
krogager_drop = krogager.drop(['Log2(LFQ) Control Output 1',
'Log2(LFQ) Control Output 2', 'Log2(LFQ) Control Output 3',
'Log2(LFQ) Control Output 4', 'Log2(LFQ) Control Output 5',
'Log2(LFQ) Control Output 6', 'Log2(LFQ) SORT Output 1',
'Log2(LFQ) SORT Output 2', 'Log2(LFQ) SORT Output 3',
'Log2(LFQ) Control median', 'Log2(LFQ) SORT median'], axis=1)
krogager_df = pd.wide_to_long(krogager_drop, stubnames='LFQintensity',
i=['Uniprot', 'gene_names'],
j='condition', sep='_', suffix=r'\w+')
krogager_df = krogager_df.reset_index()
krogager_df['Study'] = 'Krogager 2018'
krogager_df['Organism'] = 'mouse'
krogager_df['Age_days'] = 13 * 7 + 21 # 13*7 +21 # 10 weeks + 2weeks after surgery + 1 week treatment
krogager_df.loc[krogager_df['condition'] == 'SORT', 'location'] = 'neurons' # striatum neurons
krogager_df.loc[krogager_df['condition'] == 'control', 'location'] = 'striatum' # striatum neurons
krogager_df['raw_data_units'] = 'LFQintensity'
krogager_df = krogager_df.rename(columns={'LFQintensity': 'raw_data'})
return krogager_df
def get_hosp_2017_dataframe():
"""
Return pandas dataframe for Hosp 2017
:return:
pandas.core.frame.DataFrame: dataframe containing Hosp 2017 data.
"""
print("Importing Hosp 2017 pandas dataframe. This can last a while.")
hosp_solf = pd.ExcelFile('../data/source_data/1-s2.0-S2211124717315772-mmc2.xlsx')
hosp_sol = hosp_solf.parse('S1A_soluble proteome')
hosp_sol2f = pd.ExcelFile('../data/source_data/1-s2.0-S2211124717315772-mmc3.xlsx')
hosp_sol2 = hosp_sol2f.parse('S2A_CSF_proteome')
hosp_insolf = pd.ExcelFile('../data/source_data/1-s2.0-S2211124717315772-mmc4.xlsx')
hosp_insol = hosp_insolf.parse('S3A_insolube_proteome_data')
hosp_sol = hosp_sol.drop(
['GOBP name', 'GOMF name', 'GOCC name', 'KEGG name', 'Pfam', 'GSEA', 'Keywords', 'Corum', 'Peptides',
'Razor + unique peptides', 'Razor + unique peptides', 'Sequence coverage [%]',
'Unique + razor sequence coverage [%]', 'Unique sequence coverage [%]', 'Q-value'], axis=1)
hosp_sol = hosp_sol[hosp_sol.columns.drop(list(hosp_sol.filter(regex=r'LFQ')))]
hosp_sol = hosp_sol[hosp_sol.columns.drop(list(hosp_sol.filter(regex=r'_R6\/2_')))]
hosp_sol2 = hosp_sol2.drop(
['GOBP name', 'GOMF name', 'GOCC name', 'KEGG name', 'Pfam', 'GSEA', 'Fasta headers', 'Corum', 'Peptides',
'Razor + unique peptides', 'Razor + unique peptides', 'Sequence coverage [%]',
'Unique + razor sequence coverage [%]', 'Unique sequence coverage [%]', 'Q-value'], axis=1)
hosp_sol2 = hosp_sol2[hosp_sol2.columns.drop(list(hosp_sol2.filter(regex=r'LFQ')))]
hosp_sol2 = hosp_sol2[hosp_sol2.columns.drop(list(hosp_sol2.filter(regex=r'_R6\/2_')))]
hosp_insol = hosp_insol.drop(
['GOBP name', 'GOMF name', 'GOCC name', 'KEGG name', 'Pfam', 'GSEA', 'Fasta headers', 'Corum',
'Coiled-coil domain',
'LCR motif', 'polyQ domain', 'coiled-coil length', 'LCR length', 'polyQ length', 'Peptides',
'Razor + unique peptides', 'Razor + unique peptides', 'Sequence coverage [%]',
'Unique + razor sequence coverage [%]', 'Unique sequence coverage [%]', 'Q-value'], axis=1)
hosp_insol = hosp_insol[hosp_insol.columns.drop(list(hosp_insol.filter(regex=r'LFQ')))]
hosp_insol = hosp_insol[hosp_insol.columns.drop(list(hosp_insol.filter(regex=r'_R6\/2_')))]
hosp_sol['Gene names'] = hosp_sol['Gene names'].str.upper()
hosp_sol2['Gene names'] = hosp_sol2['Gene names'].str.upper()
hosp_insol['Gene names'] = hosp_insol['Gene names'].str.upper()
###
hosp_sol = hosp_sol.rename(columns={'Gene names': 'gene_names',
'Majority protein IDs': 'Uniprot',
'Mol. weight [kDa]': 'molecular_weight_kDa'})
hosp_sol = hosp_sol.drop([
'Protein IDs', 'Protein names', 'Unique peptides', 'Intensity', 'MS/MS Count',
'iBAQ', 'iBAQ library'], axis=1)
hosp_sol2 = hosp_sol2.rename(columns={'Gene names': 'gene_names',
'Majority protein IDs': 'Uniprot',
'Mol. weight [kDa]': 'molecular_weight_kDa'})
hosp_sol2 = hosp_sol2.drop([
'Protein IDs', 'Protein names', 'Unique peptides', 'Score', 'Intensity', 'MS/MS Count',
'iBAQ_total'], axis=1)
hosp_insol = hosp_insol.rename(columns={'Gene names': 'gene_names',
'Majority protein IDs': 'Uniprot',
'Mol. weight [kDa]': 'molecular_weight_kDa'})
hosp_insol = hosp_insol.drop([
'Protein IDs', 'Protein names', 'Unique peptides', 'Score', 'Intensity', 'MS/MS Count',
'iBAQ'], axis=1)
hosp_sol.columns = ['Uniprot', 'gene_names', 'molecular_weight_kDa', 'iBAQ_5wWTce1', 'iBAQ_5wWTce2', 'iBAQ_5wWTce3',
'iBAQ_5wWTce4',
'iBAQ_5wWTco1', 'iBAQ_5wWTco2', 'iBAQ_5wWTco3',
'iBAQ_5wWTco4', 'iBAQ_5wWThc1', 'iBAQ_5wWThc2',
'iBAQ_5wWThc3', 'iBAQ_5wWThc4', 'iBAQ_5wWTst1',
'iBAQ_5wWTst2', 'iBAQ_5wWTst3', 'iBAQ_5wWTst4',
'iBAQ_8wWTce1', 'iBAQ_8wWTce2', 'iBAQ_8wWTce3',
'iBAQ_8wWTco1', 'iBAQ_8wWTco2', 'iBAQ_8wWTco3',
'iBAQ_8wWThc1', 'iBAQ_8wWThc2', 'iBAQ_8wWThc3',
'iBAQ_8wWTst1', 'iBAQ_8wWTst2', 'iBAQ_8wWTst3',
'iBAQ_12wWTce1', 'iBAQ_12wWTce2', 'iBAQ_12wWTce3',
'iBAQ_12wWTco1', 'iBAQ_12wWTco2', 'iBAQ_12wWTco3',
'iBAQ_12wWThc1', 'iBAQ_12wWThc2', 'iBAQ_12wWThc3',
'iBAQ_12wWTst1', 'iBAQ_12wWTst2', 'iBAQ_12wWTst3']
hosp_sol2.columns = ['Uniprot', 'gene_names', 'molecular_weight_kDa', 'iBAQ_5wWT1', 'iBAQ_5wWT2', 'iBAQ_5wWT3',
'iBAQ_8wWT1', 'iBAQ_8wWT2',
'iBAQ_8wWT3', 'iBAQ_12wWT1', 'iBAQ_12wWT2', 'iBAQ_12wWT3'
]
hosp_insol.columns = ['Uniprot', 'gene_names', 'molecular_weight_kDa',
'iBAQ_5wWTce1', 'iBAQ_5wWTce2', 'iBAQ_5wWTce3', 'iBAQ_5wWTce4',
'iBAQ_5wWTco1', 'iBAQ_5wWTco2', 'iBAQ_5wWTco3',
'iBAQ_5wWTco4', 'iBAQ_5wWThc1', 'iBAQ_5wWThc2',
'iBAQ_5wWThc3', 'iBAQ_5wWThc4', 'iBAQ_5wWTst1',
'iBAQ_5wWTst2', 'iBAQ_5wWTst3', 'iBAQ_5wWTst4',
'iBAQ_8wWTce1', 'iBAQ_8wWTce2', 'iBAQ_8wWTce3',
'iBAQ_8wWTco1', 'iBAQ_8wWTco2', 'iBAQ_8wWTco3',
'iBAQ_8wWThc1', 'iBAQ_8wWThc2', 'iBAQ_8wWThc3',
'iBAQ_8wWTst1', 'iBAQ_8wWTst2', 'iBAQ_8wWTst3',
'iBAQ_12wWTce1', 'iBAQ_12wWTce2', 'iBAQ_12wWTce3',
'iBAQ_12wWTco1', 'iBAQ_12wWTco2', 'iBAQ_12wWTco3',
'iBAQ_12wWThc1', 'iBAQ_12wWThc2', 'iBAQ_12wWThc3',
'iBAQ_12wWTst1', 'iBAQ_12wWTst2', 'iBAQ_12wWTst3',
'iBAQ_5wWTcex/yiBAQ_inc5wWTce',
'iBAQ_5wWTcox/yiBAQ_inc5wWTco',
'iBAQ_5wWThcx/yiBAQ_inc5wWThc',
'iBAQ_5wWTstx/yiBAQ_inc5wWTst',
'iBAQ_8wWTcex/yiBAQ_inc8wWTce',
'iBAQ_8wWTcox/yiBAQ_inc8wWTco',
'iBAQ_8wWThcx/yiBAQ_inc8wWThc',
'iBAQ_8wWTstx/yiBAQ_inc8wWTst',
'iBAQ_12wWTcex/yiBAQ_inc12wWTce',
'iBAQ_12wWTcox/yiBAQ_inc12wWTco',
'iBAQ_12wWThcx/yiBAQ_inc12wWThc',
'iBAQ_12wWTstx/yiBAQ_inc12wWTst']
hosp_insol = hosp_insol.drop(['iBAQ_5wWTcex/yiBAQ_inc5wWTce',
'iBAQ_5wWTcox/yiBAQ_inc5wWTco',
'iBAQ_5wWThcx/yiBAQ_inc5wWThc',
'iBAQ_5wWTstx/yiBAQ_inc5wWTst',
'iBAQ_8wWTcex/yiBAQ_inc8wWTce',
'iBAQ_8wWTcox/yiBAQ_inc8wWTco',
'iBAQ_8wWThcx/yiBAQ_inc8wWThc',
'iBAQ_8wWTstx/yiBAQ_inc8wWTst',
'iBAQ_12wWTcex/yiBAQ_inc12wWTce',
'iBAQ_12wWTcox/yiBAQ_inc12wWTco',
'iBAQ_12wWThcx/yiBAQ_inc12wWThc',
'iBAQ_12wWTstx/yiBAQ_inc12wWTst'], axis=1)
hosp_sol_df = pd.wide_to_long(hosp_sol, stubnames='iBAQ',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa'],
j='sample_id', sep='_', suffix=r'\w+')
hosp_sol_df = hosp_sol_df.reset_index()
hosp_sol_df['Study'] = 'Hosp 2017, soluble'
hosp_sol_df['Organism'] = 'mouse'
hosp_sol_df['raw_data_units'] = 'iBAQ'
hosp_sol_df = hosp_sol_df.rename(columns={'iBAQ': 'raw_data'})
hosp_sol2_df = pd.wide_to_long(hosp_sol2, stubnames='iBAQ',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa'],
j='sample_id', sep='_', suffix=r'\w+')
hosp_sol2_df = hosp_sol2_df.reset_index()
hosp_sol2_df['Study'] = 'Hosp 2017, CSF'
hosp_sol2_df['Organism'] = 'mouse'
hosp_sol2_df['raw_data_units'] = 'iBAQ'
hosp_sol2_df = hosp_sol2_df.rename(columns={'iBAQ': 'raw_data'})
hosp_insol_df = pd.wide_to_long(hosp_insol, stubnames='iBAQ',
i=['Uniprot', 'gene_names', 'molecular_weight_kDa'],
j='sample_id', sep='_', suffix=r'\w+')
hosp_insol_df = hosp_insol_df.reset_index()
hosp_insol_df['Study'] = 'Hosp 2017, insoluble'
hosp_insol_df['Organism'] = 'mouse'
hosp_insol_df['raw_data_units'] = 'iBAQ'
hosp_insol_df = hosp_insol_df.rename(columns={'iBAQ': 'raw_data'})
hosp_3 = pd.concat([hosp_sol_df, hosp_sol2_df, hosp_insol_df
], ignore_index=True, sort=False)
hosp_3.loc[hosp_3['sample_id'].isin(['5wWTce1', '5wWTce2', '5wWTce3', '5wWTce4', '5wWTco1', '5wWTco2',
'5wWTco3', '5wWTco4', '5wWThc1', '5wWThc2', '5wWThc3', '5wWThc4',
'5wWTst1', '5wWTst2', '5wWTst3', '5wWTst4', '5wWT1', '5wWT2',
'5wWT3']), 'Age_days'] = 35 + 21 # 35 +21 #'5 weeks'
hosp_3.loc[hosp_3['sample_id'].isin(['8wWTce1', '8wWTce2',
'8wWTce3', '8wWTco1', '8wWTco2', '8wWTco3', '8wWThc1', '8wWThc2',
'8wWThc3', '8wWTst1', '8wWTst2', '8wWTst3', '8wWT1', '8wWT2',
'8wWT3']), 'Age_days'] = 56 + 21 # 56 +21 #'8 weeks'
hosp_3.loc[
hosp_3['sample_id'].isin(['12wWTce1', '12wWTce2', '12wWTce3', '12wWTco1', '12wWTco2', '12wWTco3', '12wWThc1',
'12wWThc2', '12wWThc3', '12wWTst1', '12wWTst2', '12wWTst3', '12wWT1', '12wWT2',
'12wWT3']), 'Age_days'] = 12 * 7 + 21 # 12*7 +21 #'12 weeks'
hosp_3.loc[hosp_3['sample_id'].isin(['5wWTce1', '5wWTce2', '5wWTce3', '5wWTce4', '8wWTce1', '8wWTce2',
'8wWTce3', '12wWTce1', '12wWTce2',
'12wWTce3']), 'location'] = 'cerebellum'
hosp_3.loc[hosp_3['sample_id'].isin(['5wWTco1', '5wWTco2', '5wWTco3', '5wWTco4', '8wWTco1', '8wWTco2', '8wWTco3',
'12wWTco1', '12wWTco2', '12wWTco3']), 'location'] = 'cortex'
hosp_3.loc[hosp_3['sample_id'].isin(['5wWThc1', '5wWThc2', '5wWThc3', '5wWThc4',
'8wWThc1', '8wWThc2', '8wWThc3', '12wWThc1', '12wWThc2',
'12wWThc3']), 'location'] = 'hippocampus'
hosp_3.loc[hosp_3['sample_id'].isin(
['5wWTst1', '5wWTst2', '5wWTst3', '5wWTst4', '8wWTst1', '8wWTst2', '8wWTst3', '12wWTst1', '12wWTst2',
'12wWTst3']), 'location'] = 'striatum'
hosp_3.loc[hosp_3['sample_id'].isin(
['5wWT1', '5wWT2', '5wWT3', '8wWT1', '8wWT2', '8wWT3', '12wWT1', '12wWT2', '12wWT3']), 'location'] = 'csf'
return hosp_3
def get_itzhak_2017_dataframe():
"""
Return pandas dataframe for Itzhak 2017
:return:
pandas.core.frame.DataFrame: dataframe containing Itzhak 2017 data.
"""
print("Importing itzhak 2017 pandas dataframe. This can last a while.")
itzhak_concf = | pd.ExcelFile('../data/source_data/1-s2.0-S2211124717311889-mmc4.xlsx') | pandas.ExcelFile |
"""A script for running an already trained SSD model on a video, saving the result as
both a video file which can be inspected by humans, and also as a text file.
Only uses imageio for video input/output which is nice for when OpenCV
is built without video encoding support. """
import cv2
import imageio as io
import numpy as np
import pandas as pd
import click
from visualize import draw, class_colors
from ssd import SSD300
from create_prior_box import create_prior_box
from ssd_utils import BBoxUtility
from keras.applications.imagenet_utils import preprocess_input
from apply_mask import Masker
from classnames import get_classnames
from util import parse_resolution, print_flush
from folder import runs_path
def rescale(df, index, factor):
""" Rescales a data frame row, as integers. Used since detections are stored on scale 0-1 """
s = df[index]
s2 = [int(factor*x) for x in s]
df[index] = s2
def get_model(name, experiment, input_shape, num_classes=6, verbose=True):
""" Gets an SSD model, with trained weights
Arguments:
name -- name of the dataset
experiment -- name of this training run
input_shape -- size of images fed to SSD as a tuple like (640,480,3)
num_classes -- the number of different object classes (including background)
"""
model = SSD300((input_shape[1],input_shape[0],input_shape[2]), num_classes=num_classes)
weights_files = list((runs_path / "{}_{}".format(name,experiment) / "checkpoints").glob('*.hdf5'))
weights_files_loss = np.array([float(wf.stem.split('-')[-1]) for wf in weights_files])
weights_file = weights_files[np.argmin(weights_files_loss)]
model.load_weights(weights_file, by_name=True)
if verbose:
print_flush('Model loaded from {}'.format(weights_file))
return model
def test_on_video(model, name, experiment, videopath, outvideopath, classnames, batch_size=32, input_shape=(480,640,3), soft=False, width=480, height=640, conf_thresh=0.75, csv_conf_thresh=0.75):
""" Applies a trained SSD model to a video
Arguments:
model -- the SSD model, e.g. from get_model
name -- name of dataset
experiment -- name of training run
videopath -- path to input video
outvideopath -- path to output video showing the detections
classnames -- list of all the classes
batch_size -- number of images processed in parallell, lower this if you get out-of-memory errors
input_shape -- size of images fed to SSD
soft -- Whether to do soft NMS or normal NMS
width -- Width to scale detections with (can be set to 1 if detections are already on right scale)
height -- Height to scale detections with (can be set to 1 if detections are already on right scale)
conf_thresh -- Detections with confidences below this are not shown in output video. Set to negative to not visualize confidences.
csv_conf_thresh -- Detections with confidences below this are ignored. This should be same as conf_thresh unless conf_thresh is negative.
"""
masker = Masker(name)
num_classes = len(classnames)+1
colors = class_colors(num_classes)
make_vid = True
suffix = outvideopath.split('.')[-1]
if suffix == 'csv':
make_vid = False
csvpath = outvideopath
else:
csvpath = outvideopath.replace('.{}'.format(suffix), '.csv')
print_flush('Generating priors')
im_in = np.random.random((1,input_shape[1],input_shape[0],input_shape[2]))
priors = model.predict(im_in,batch_size=1)[0, :, -8:]
bbox_util = BBoxUtility(num_classes, priors)
vid = io.get_reader(videopath)
if make_vid:
outvid = io.get_writer(outvideopath, fps=30)
inputs = []
frames = []
all_detections = []
for i,frame in enumerate(vid):
frame = masker.mask(frame)
resized = cv2.resize(frame, (input_shape[0], input_shape[1]))
frames.append(frame.copy())
inputs.append(resized)
if len(inputs) == batch_size:
inputs = np.array(inputs).astype(np.float64)
inputs = preprocess_input(inputs)
preds = model.predict(inputs, batch_size=batch_size, verbose=0)
results = bbox_util.detection_out(preds, soft=soft)
for result, frame, frame_number in zip(results, frames, range(i-batch_size, i)):
result = [r if len(r) > 0 else np.zeros((1, 6)) for r in result]
raw_detections = pd.DataFrame(np.vstack(result), columns=['class_index', 'confidence', 'xmin', 'ymin', 'xmax', 'ymax'])
rescale(raw_detections, 'xmin', width)
rescale(raw_detections, 'xmax', width)
rescale(raw_detections, 'ymin', height)
rescale(raw_detections, 'ymax', height)
rescale(raw_detections, 'class_index', 1)
ci = raw_detections['class_index']
cn = [classnames[int(x)-1] for x in ci]
raw_detections['class_name'] = cn
raw_detections['frame_number'] = (frame_number+2)
all_detections.append(raw_detections[raw_detections.confidence>csv_conf_thresh])
if make_vid:
frame = draw(frame, raw_detections, colors, conf_thresh=conf_thresh)
outvid.append_data(frame)
frames = []
inputs = []
if i%(10*batch_size) == 0:
print_flush(i)
detections = | pd.concat(all_detections) | pandas.concat |
"""Run unit tests.
Use this to run tests and understand how tasks.py works.
Example:
Create directories::
mkdir -p test-data/input
mkdir -p test-data/output
Run tests::
pytest test_combine.py -s
Notes:
* this will create sample csv, xls and xlsx files
* test_combine_() test the main combine function
"""
from d6tstack.combine_csv import *
from d6tstack.sniffer import CSVSniffer
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import ntpath
import pytest
cfg_fname_base_in = 'test-data/input/test-data-'
cfg_fname_base_out_dir = 'test-data/output'
cfg_fname_base_out = cfg_fname_base_out_dir+'/test-data-'
cnxn_string = 'sqlite:///test-data/db/{}.db'
#************************************************************
# fixtures
#************************************************************
class TestLogPusher(object):
def __init__(self, event):
pass
def send_log(self, msg, status):
pass
def send(self, data):
pass
logger = TestLogPusher('combiner')
# sample data
def create_files_df_clean():
# create sample data
df1=pd.DataFrame({'date':pd.date_range('1/1/2011', periods=10), 'sales': 100, 'cost':-80, 'profit':20})
df2=pd.DataFrame({'date':pd.date_range('2/1/2011', periods=10), 'sales': 200, 'cost':-90, 'profit':200-90})
df3=pd.DataFrame({'date':pd.date_range('3/1/2011', periods=10), 'sales': 300, 'cost':-100, 'profit':300-100})
# cfg_col = [ 'date', 'sales','cost','profit']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
return df1, df2, df3
def create_files_df_clean_combine():
df1,df2,df3 = create_files_df_clean()
df_all = pd.concat([df1,df2,df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_clean_combine_with_filename(fname_list):
df1, df2, df3 = create_files_df_clean()
df1['filename'] = os.path.basename(fname_list[0])
df2['filename'] = os.path.basename(fname_list[1])
df3['filename'] = os.path.basename(fname_list[2])
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_colmismatch_combine(cfg_col_common):
df1, df2, df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_colmismatch_combine2(cfg_col_common):
df1, df2, df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
# csv standard
@pytest.fixture(scope="module")
def create_files_csv():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-csv-clean-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch():
df1,df2,df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch2():
df1,df2,df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch2-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colreorder():
df1,df2,df3 = create_files_df_clean()
cfg_col = [ 'date', 'sales','cost','profit']
cfg_col2 = [ 'date', 'sales','profit','cost']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
# save files
cfg_fname = cfg_fname_base_in+'input-csv-reorder-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan',index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb',index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_noheader():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-noheader-csv-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False, header=False)
df2.to_csv(cfg_fname % 'feb',index=False, header=False)
df3.to_csv(cfg_fname % 'mar',index=False, header=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_col_renamed():
df1, df2, df3 = create_files_df_clean()
df3 = df3.rename(columns={'sales':'revenue'})
cfg_col = ['date', 'sales', 'profit', 'cost']
cfg_col2 = ['date', 'revenue', 'profit', 'cost']
cfg_fname = cfg_fname_base_in + 'input-csv-renamed-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan', index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb', index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar', index=False)
return [cfg_fname % 'jan', cfg_fname % 'feb', cfg_fname % 'mar']
def test_create_files_csv_col_renamed(create_files_csv_col_renamed):
pass
def create_files_csv_dirty(cfg_sep=",", cfg_header=True):
df1,df2,df3 = create_files_df_clean()
df1.to_csv(cfg_fname_base_in+'debug.csv',index=False, sep=cfg_sep, header=cfg_header)
return cfg_fname_base_in+'debug.csv'
# excel single-tab
def create_files_xls_single_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
df1.to_excel(cfg_fname % 'jan',index=False)
df2.to_excel(cfg_fname % 'feb',index=False)
df3.to_excel(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xlsx')
def write_file_xls(dfg, fname, startrow=0,startcol=0):
writer = pd.ExcelWriter(fname)
dfg.to_excel(writer, 'Sheet1', index=False,startrow=startrow,startcol=startcol)
dfg.to_excel(writer, 'Sheet2', index=False,startrow=startrow,startcol=startcol)
writer.save()
# excel multi-tab
def create_files_xls_multiple_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
write_file_xls(df1,cfg_fname % 'jan')
write_file_xls(df2,cfg_fname % 'feb')
write_file_xls(df3,cfg_fname % 'mar')
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xlsx')
#************************************************************
# tests - helpers
#************************************************************
def test_file_extensions_get():
fname_list = ['a.csv','b.csv']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.csv','.csv']
fname_list = ['a.xls','b.xls']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.xls','.xls']
def test_file_extensions_all_equal():
ext_list = ['.csv']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.xls']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.csv','.xls']
assert not file_extensions_all_equal(ext_list)
def test_file_extensions_valid():
ext_list = ['.csv']*2
assert file_extensions_valid(ext_list)
ext_list = ['.xls']*2
assert file_extensions_valid(ext_list)
ext_list = ['.exe','.xls']
assert not file_extensions_valid(ext_list)
#************************************************************
#************************************************************
# combine_csv
#************************************************************
#************************************************************
def test_csv_sniff_single(create_files_csv, create_files_csv_noheader):
sniff = CSVSniffer(create_files_csv[0])
sniff.get_delim()
assert sniff.delim == ','
assert sniff.count_skiprows() == 0
assert sniff.has_header()
fname = create_files_csv_dirty("|")
sniff = CSVSniffer(fname)
sniff.get_delim()
assert sniff.delim == "|"
assert sniff.has_header()
df1,df2,df3 = create_files_df_clean()
assert sniff.nrows == df1.shape[0]+1
# no header test
sniff = CSVSniffer(create_files_csv_noheader[0])
sniff.get_delim()
assert sniff.delim == ','
assert sniff.count_skiprows() == 0
assert not sniff.has_header()
def test_csv_sniff_multi(create_files_csv, create_files_csv_noheader):
sniff = CSVSnifferList(create_files_csv)
assert sniff.get_delim() == ','
assert sniff.count_skiprows() == 0
assert sniff.has_header()
# no header test
sniff = CSVSnifferList(create_files_csv_noheader)
sniff.get_delim()
assert sniff.get_delim() == ','
assert sniff.count_skiprows() == 0
assert not sniff.has_header()
def test_CombinerCSV_columns(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
with pytest.raises(ValueError) as e:
c = CombinerCSV([])
fname_list = create_files_csv
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
col_preview = combiner.preview_columns()
# todo: cache the preview dfs somehow? reading the same in next step
assert col_preview['is_all_equal']
assert col_preview['columns_all']==col_preview['columns_common']
assert col_preview['columns_all']==['cost', 'date', 'profit', 'sales']
fname_list = create_files_csv_colmismatch
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
col_preview = combiner.preview_columns()
# todo: cache the preview dfs somehow? reading the same in next step
assert not col_preview['is_all_equal']
assert not col_preview['columns_all']==col_preview['columns_common']
assert col_preview['columns_all']==['cost', 'date', 'profit', 'profit2', 'sales']
assert col_preview['columns_common']==['cost', 'date', 'profit', 'sales']
assert col_preview['columns_unique']==['profit2']
fname_list = create_files_csv_colreorder
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
col_preview = combiner.preview_columns()
assert not col_preview['is_all_equal']
assert col_preview['columns_all']==col_preview['columns_common']
def test_CombinerCSV_combine(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
# all columns present
fname_list = create_files_csv
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
df = combiner.combine()
df = df.sort_values('date').drop(['filename'],axis=1)
df_chk = create_files_df_clean_combine()
assert df.equals(df_chk)
df = combiner.combine()
df = df.groupby('filename').head(combiner.nrows_preview)
df_chk = combiner.preview_combine()
assert df.equals(df_chk)
# columns mismatch, all columns
fname_list = create_files_csv_colmismatch
combiner = CombinerCSV(fname_list=fname_list, all_strings=True, add_filename=True)
df = combiner.combine()
df = df.sort_values('date').drop(['filename'],axis=1)
df_chk = create_files_df_colmismatch_combine(cfg_col_common=False)
assert df.shape[1] == df_chk.shape[1]
# columns mismatch, common columns
df = combiner.combine(is_col_common=True)
df = df.sort_values('date').drop(['filename'], axis=1)
df_chk = create_files_df_colmismatch_combine(cfg_col_common=True)
assert df.shape[1] == df_chk.shape[1]
# Filename column True
fname_list = create_files_csv
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
df = combiner.combine()
df = df.sort_values('date')
df_chk = create_files_df_clean_combine_with_filename(fname_list)
assert df.equals(df_chk)
# Filename column False
combiner = CombinerCSV(fname_list=fname_list, all_strings=True, add_filename=False)
df = combiner.combine()
df = df.sort_values('date')
df_chk = create_files_df_clean_combine()
assert df.equals(df_chk)
def test_CombinerCSV_combine_advanced(create_files_csv):
# Check if rename worked correctly.
fname_list = create_files_csv
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
adv_combiner = CombinerCSV(fname_list=fname_list, all_strings=True,
columns_select=None, columns_rename={'date':'date1'})
df = adv_combiner.combine()
assert 'date1' in df.columns.values
assert 'date' not in df.columns.values
df = adv_combiner.preview_combine()
assert 'date1' in df.columns.values
assert 'date' not in df.columns.values
adv_combiner = CombinerCSV(fname_list=fname_list, all_strings=True,
columns_select=['cost', 'date', 'profit', 'profit2', 'sales'])
df = adv_combiner.combine()
assert 'profit2' in df.columns.values
assert df['profit2'].isnull().all()
df = adv_combiner.preview_combine()
assert 'profit2' in df.columns.values
assert df['profit2'].isnull().all()
def test_preview_dict():
df = pd.DataFrame({'col1':[0,1],'col2':[0,1]})
assert preview_dict(df) == {'columns': ['col1', 'col2'], 'rows': {0: [[0]], 1: [[1]]}}
#************************************************************
# tests - CombinerCSV rename and select columns
#************************************************************
def create_df_rename():
df11 = pd.DataFrame({'a':range(10)})
df12 = pd.DataFrame({'b': range(10)})
df21 = pd.DataFrame({'a':range(10),'c': range(10)})
df22 = pd.DataFrame({'b': range(10),'c': range(10)})
return df11, df12, df21, df22
# csv standard
@pytest.fixture(scope="module")
def create_files_csv_rename():
df11, df12, df21, df22 = create_df_rename()
# save files
cfg_fname = cfg_fname_base_in+'input-csv-rename-%s.csv'
df11.to_csv(cfg_fname % '11',index=False)
df12.to_csv(cfg_fname % '12',index=False)
df21.to_csv(cfg_fname % '21',index=False)
df22.to_csv(cfg_fname % '22',index=False)
return [cfg_fname % '11',cfg_fname % '12',cfg_fname % '21',cfg_fname % '22']
def test_create_files_csv_rename(create_files_csv_rename):
pass
@pytest.fixture(scope="module")
def create_out_files_csv_align_save():
cfg_outname = cfg_fname_base_out + 'input-csv-rename-%s-align-save.csv'
return [cfg_outname % '11', cfg_outname % '12',cfg_outname % '21',cfg_outname % '22']
@pytest.fixture(scope="module")
def create_out_files_parquet_align_save():
cfg_outname = cfg_fname_base_out + 'input-csv-rename-%s-align-save.parquet'
return [cfg_outname % '11', cfg_outname % '12',cfg_outname % '21',cfg_outname % '22']
def test_apply_select_rename():
df11, df12, df21, df22 = create_df_rename()
# rename 1, select all
assert df11.equals(apply_select_rename(df12.copy(),[],{'b':'a'}))
# rename and select 1
assert df11.equals(apply_select_rename(df22.copy(),['b'],{'b':'a'}))
assert df11.equals(apply_select_rename(df22.copy(),['a'],{'b':'a'}))
# rename and select 2
assert df21[list(dict.fromkeys(df21.columns))].equals(apply_select_rename(df22.copy(),['b','c'],{'b':'a'}))
assert df21[list(dict.fromkeys(df21.columns))].equals(apply_select_rename(df22.copy(),['a','c'],{'b':'a'}))
with pytest.warns(UserWarning, match="Renaming conflict"):
assert df22.equals(apply_select_rename(df22.copy(), ['b', 'c'], {'c': 'b'}))
def test_CombinerCSV_rename(create_files_csv_rename):
df11, df12, df21, df22 = create_df_rename()
df_chk1 = pd.concat([df11,df11])
df_chk2 = pd.concat([df11,df21])
def helper(fnames, cfg_col_sel,cfg_col_rename, df_chk, chk_filename=False, is_filename_col=True):
if cfg_col_sel and cfg_col_rename:
c2 = CombinerCSV(fnames, add_filename=is_filename_col,
columns_select=cfg_col_sel, columns_rename=cfg_col_rename)
elif cfg_col_rename:
c2 = CombinerCSV(fnames, add_filename=is_filename_col, columns_rename=cfg_col_rename)
else:
c2 = CombinerCSV(fnames, add_filename=is_filename_col)
dfc = c2.combine()
if (not chk_filename) and is_filename_col:
dfc = dfc.drop(['filename'], 1)
assert dfc.equals(df_chk)
if cfg_col_sel:
fname_out = cfg_fname_base_out_dir + '/test_save.csv'
c2.combine_save(fname_out)
dfc = pd.read_csv(fname_out)
if (not chk_filename) or is_filename_col:
dfc = dfc.drop(['filename'], 1)
assert dfc.equals(df_chk.reset_index(drop=True))
# rename 1, select all
l = create_files_csv_rename[:2]
helper(l,None,{'b':'a'},df_chk1)
with pytest.raises(ValueError) as e:
c2 = CombinerCSV(l, columns_select=['a','a'])
# rename 1, select some
l = [create_files_csv_rename[0],create_files_csv_rename[-1]]
helper(l,['a'],{'b':'a'},df_chk1)
helper(l,['b'],{'b':'a'},df_chk1)
helper(l,None,{'b':'a'},df_chk2)
l = [create_files_csv_rename[1],create_files_csv_rename[-1]]
helper(l,['a'],{'b':'a'},df_chk1)
helper(l,['b'],{'b':'a'},df_chk1)
helper(l,None,{'b':'a'},df_chk2)
with pytest.warns(UserWarning, match="Renaming conflict"):
c2 = CombinerCSV(l, columns_rename={'b': 'a', 'c': 'a'})
c2.combine()
# rename none, select all
l = [create_files_csv_rename[0],create_files_csv_rename[2]]
helper(l,None,None,df_chk2)
# filename col True
df31 = df11
df32 = df21
df31['filename'] = os.path.basename(l[0])
df32['filename'] = os.path.basename(l[1])
df_chk3 = pd.concat([df31, df32])
helper(l, None, None, df_chk3, is_filename_col=True, chk_filename=True)
helper(l, None, None, df_chk2, is_filename_col=False, chk_filename=True)
def test_CombinerCSV_align_save_advanced(create_files_csv_rename, create_out_files_csv_align_save):
df11, df12, df21, df22 = create_df_rename()
def helper(fnames, cfg_col_sel, cfg_col_rename, new_fnames, df_chks, is_filename_col=False):
if cfg_col_sel and cfg_col_rename:
c2 = CombinerCSV(fnames, add_filename=is_filename_col,
columns_select=cfg_col_sel, columns_rename=cfg_col_rename)
elif cfg_col_sel:
c2 = CombinerCSV(fnames, add_filename=is_filename_col, columns_select=cfg_col_sel)
elif cfg_col_rename:
c2 = CombinerCSV(fnames, add_filename=is_filename_col, columns_rename=cfg_col_rename)
else:
c2 = CombinerCSV(fnames, add_filename=is_filename_col)
c2.align_save(output_dir=cfg_fname_base_out_dir, prefix="-align-save")
for fname_out, df_chk in zip(new_fnames, df_chks):
dfc = pd.read_csv(fname_out)
assert dfc.equals(df_chk)
# rename 1, select all
l = create_files_csv_rename[:2]
outl = create_out_files_csv_align_save[:2]
helper(l, ['a'], {'b':'a'}, outl, [df11, df11])
# rename 1, select some
l = [create_files_csv_rename[2]]
outl = [create_out_files_csv_align_save[2]]
helper(l, ['a'], {'b':'a'}, outl, [df11])
# rename none, select 1
l = [create_files_csv_rename[2]]
outl = [create_out_files_csv_align_save[2]]
helper(l, ['a'], None, outl, [df11])
# rename none, select all
l = [create_files_csv_rename[2]]
outl = [create_out_files_csv_align_save[2]]
helper(l, ['a', 'c'], None, outl, [df21])
# rename none, select all, filename col true
df21['filename'] = os.path.basename(outl[0])
helper(l, ['a', 'c'], None, outl, [df21], is_filename_col=True)
def test_CombinerCSV_sql_advanced(create_files_csv_rename):
df11, df12, df21, df22 = create_df_rename()
def helper(fnames, cfg_col_sel, cfg_col_rename, df_chks, is_filename_col=False, stream=False):
if cfg_col_sel and cfg_col_rename:
c2 = CombinerCSV(fnames, add_filename=is_filename_col,
columns_select=cfg_col_sel, columns_rename=cfg_col_rename)
elif cfg_col_sel:
c2 = CombinerCSV(fnames, add_filename=is_filename_col, columns_select=cfg_col_sel)
elif cfg_col_rename:
c2 = CombinerCSV(fnames, add_filename=is_filename_col, columns_rename=cfg_col_rename)
else:
c2 = CombinerCSV(fnames, add_filename=is_filename_col)
df_chk = pd.DataFrame()
for df in df_chks:
df_chk = df_chk.append(df)
table_name = 'test'
db_cnxn_string = cnxn_string.format('test-combined-adv')
if stream:
c2.to_sql_stream(db_cnxn_string, table_name)
else:
c2.to_sql(db_cnxn_string, table_name)
dfc = pd.read_sql("select * from test", db_cnxn_string)
dfc = dfc.set_index('index')
dfc.index.name = None
pd.testing.assert_frame_equal(dfc, df_chk)
assert dfc.equals(df_chk)
# rename 1, select all
l = create_files_csv_rename[:2]
helper(l, ['a'], {'b': 'a'}, [df11, df11], stream=True)
# test sql stream
helper(l, ['a'], {'b': 'a'}, [df11, df11])
# rename 1, select some
l = [create_files_csv_rename[2]]
helper(l, ['a'], {'b': 'a'}, [df11])
# rename none, select 1
l = [create_files_csv_rename[2]]
helper(l, ['a'], None, [df11])
# rename none, select all
l = [create_files_csv_rename[2]]
helper(l, ['a', 'c'], None, [df21])
# rename none, select all, filename col true
df21['filename'] = os.path.basename(l[0])
helper(l, ['a', 'c'], None, [df21], is_filename_col=True)
def test_CombinerCSV_sql(create_files_csv):
def helper(fnames, is_col_common=False, is_filename_col=False, stream=False):
combiner = CombinerCSV(fname_list=fnames, all_strings=True, add_filename=is_filename_col)
table_name = 'test'
db_cnxn_string = cnxn_string.format('test-combined-adv')
if stream:
combiner.to_sql_stream(db_cnxn_string, table_name, is_col_common=is_col_common)
else:
combiner.to_sql(db_cnxn_string, table_name, is_col_common=is_col_common)
df = pd.read_sql("select * from test", db_cnxn_string)
df = df.set_index('index')
df.index.name = None
return df
# all columns present, to_sql
fname_list = create_files_csv
df_chk = create_files_df_clean_combine()
assert df_chk.equals(helper(fname_list))
# to sql stream
assert df_chk.equals(helper(fname_list, stream=True))
# columns mismatch, common columns, to_sql
fname_list = create_files_csv_colmismatch()
df_chk = create_files_df_colmismatch_combine(cfg_col_common=True)
assert helper(fname_list, is_col_common=True).shape[1] == df_chk.shape[1]
def test_combinercsv_to_csv(create_files_csv_rename, create_out_files_csv_align_save):
fnames = create_files_csv_rename
df11, df12, df21, df22 = create_df_rename()
# error when separate files is False and no out_filename
with pytest.raises(ValueError):
c = CombinerCSV(fnames)
c.to_csv(separate_files=False)
# to_csv will call align_save
fnames = create_files_csv_rename[:2]
new_names = create_out_files_csv_align_save[:2]
c2 = CombinerCSV(fnames, columns_select=['a'],
columns_rename={'b': 'a'}, add_filename=False)
c2.to_csv(output_dir=cfg_fname_base_out_dir, prefix="-align-save")
df_chks = [df11, df11]
for fname_out, df_chk in zip(new_names, df_chks):
dfc = pd.read_csv(fname_out)
assert dfc.equals(df_chk)
# to_csv will call combine_save
df_chk = pd.concat([df11, df11])
fnames = [create_files_csv_rename[0], create_files_csv_rename[-1]]
c3 = CombinerCSV(fnames, columns_select=['a'], columns_rename={'b': 'a'})
dfc = c3.combine()
dfc = dfc.drop(['filename'], 1)
assert dfc.equals(df_chk)
fname_out = cfg_fname_base_out_dir + '/test_save.csv'
with pytest.warns(UserWarning, match="File already exists"):
c3.to_csv(out_filename=fname_out, separate_files=False, streaming=True, overwrite=False)
c3.to_csv(out_filename=fname_out, separate_files=False, streaming=True)
dfc = pd.read_csv(fname_out)
dfc = dfc.drop(['filename'], 1)
assert dfc.equals(df_chk.reset_index(drop=True))
def test_combinercsv_to_parquet(create_files_csv_rename, create_out_files_parquet_align_save):
fnames = create_files_csv_rename
df11, df12, df21, df22 = create_df_rename()
# error when separate files is False and no out_filename
with pytest.raises(ValueError):
c = CombinerCSV(fnames)
c.to_parquet(separate_files=False)
# to_csv will call align_save
fnames = create_files_csv_rename[:2]
new_names = create_out_files_parquet_align_save[:2]
c2 = CombinerCSV(fnames, columns_select=['a'],
columns_rename={'b': 'a'}, add_filename=False)
c2.to_parquet(output_dir=cfg_fname_base_out_dir, prefix="-align-save")
df_chks = [df11, df11]
for fname_out, df_chk in zip(new_names, df_chks):
table = pq.read_table(fname_out)
dfc = table.to_pandas()
assert dfc.equals(df_chk)
# to_csv will call combine_save
df_chk = | pd.concat([df11, df11]) | pandas.concat |
""" Functions to generate the set of endpoints for the time series
benchmark on the HiRID database"""
import glob
import logging
import math
import os
import os.path
import pickle
import random
import sys
import lightgbm as lgbm
import numpy as np
import pandas as pd
import skfda.preprocessing.smoothing.kernel_smoothers as skks
import skfda.representation.grid as skgrid
import sklearn.linear_model as sklm
import sklearn.metrics as skmetrics
import sklearn.preprocessing as skpproc
def load_pickle(fpath):
""" Given a file path pointing to a pickle file, yields the object pickled in this file"""
with open(fpath, 'rb') as fp:
return pickle.load(fp)
SUPPOX_TO_FIO2 = {
0: 21,
1: 26,
2: 34,
3: 39,
4: 45,
5: 49,
6: 54,
7: 57,
8: 58,
9: 63,
10: 66,
11: 67,
12: 69,
13: 70,
14: 73,
15: 75}
def mix_real_est_pao2(pao2_col, pao2_meas_cnt, pao2_est_arr, bandwidth=None):
""" Mix real PaO2 measurement and PaO2 estimates using a Gaussian kernel"""
final_pao2_arr = np.copy(pao2_est_arr)
sq_scale = 57 ** 2 # 1 hour has mass 1/3 approximately
for idx in range(final_pao2_arr.size):
meas_ref = pao2_meas_cnt[idx]
real_val = None
real_val_dist = None
# Search forward and backward with priority giving to backward if equi-distant
for sidx in range(48):
if not idx - sidx < 0 and pao2_meas_cnt[idx - sidx] < meas_ref:
real_val = pao2_col[idx - sidx + 1]
real_val_dist = 5 * sidx
break
elif not idx + sidx >= final_pao2_arr.size and pao2_meas_cnt[idx + sidx] > meas_ref:
real_val = pao2_col[idx + sidx]
real_val_dist = 5 * sidx
break
if real_val is not None:
alpha_mj = math.exp(-real_val_dist ** 2 / sq_scale)
alpha_ej = 1 - alpha_mj
final_pao2_arr[idx] = alpha_mj * real_val + alpha_ej * pao2_est_arr[idx]
return final_pao2_arr
def perf_regression_model(X_list, y_list, aux_list, configs=None):
""" Initial test of a regression model to estimate the current Pao2 based
on 6 features of the past. Also pass FiO2 to calculate resulting mistakes in
the P/F ratio"""
logging.info("Testing regression model for PaO2...")
# Partition the data into 3 sets and run SGD regressor
X_train = X_list[:int(0.6 * len(X_list))]
X_train = np.vstack(X_train)
y_train = np.concatenate(y_list[:int(0.6 * len(y_list))])
X_val = X_list[int(0.6 * len(X_list)):int(0.8 * len(X_list))]
X_val = np.vstack(X_val)
y_val = np.concatenate(y_list[int(0.6 * len(y_list)):int(0.8 * len(y_list))])
X_test = X_list[int(0.8 * len(X_list)):]
X_test = np.vstack(X_test)
y_test = np.concatenate(y_list[int(0.8 * len(y_list)):])
fio2_test = np.concatenate(aux_list[int(0.8 * len(aux_list)):])
if configs["sur_model_type"] == "linear":
scaler = skpproc.StandardScaler()
X_train_std = scaler.fit_transform(X_train)
X_val_std = scaler.transform(X_val)
X_test_std = scaler.transform(X_test)
if configs["sur_model_type"] == "linear":
alpha_cands = [0.0001, 0.001, 0.01, 0.1, 1.0]
elif configs["sur_model_type"] == "lgbm":
alpha_cands = [32]
best_alpha = None
best_score = np.inf
# Search for the best model on the validation set
for alpha in alpha_cands:
logging.info("Testing alpha: {}".format(alpha))
if configs["sur_model_type"] == "linear":
lmodel_cand = sklm.SGDRegressor(alpha=alpha, random_state=2021)
elif configs["sur_model_type"] == "lgbm":
lmodel_cand = lgbm.LGBMRegressor(num_leaves=alpha, learning_rate=0.05, n_estimators=1000,
random_state=2021)
if configs["sur_model_type"] == "linear":
lmodel_cand.fit(X_train_std, y_train)
elif configs["sur_model_type"] == "lgbm":
lmodel_cand.fit(X_train_std, y_train, eval_set=(X_val_std, y_val), early_stopping_rounds=20,
eval_metric="mae")
pred_y_val = lmodel_cand.predict(X_val_std)
mae_val = np.median(np.absolute(y_val - pred_y_val))
if mae_val < best_score:
best_score = mae_val
best_alpha = alpha
lmodel = sklm.SGDRegressor(alpha=best_alpha, random_state=2021)
lmodel.fit(X_train_std, y_train)
pred_y_test = lmodel.predict(X_test_std)
pred_pf_ratio_test = pred_y_test / fio2_test
true_pf_ratio_test = y_test / fio2_test
mae_test = skmetrics.mean_absolute_error(y_test, pred_y_test)
logging.info("Mean absolute error in test set: {:.3f}".format(mae_test))
def percentile_smooth(signal_col, percentile, win_scope_mins):
""" Window percentile smoother, where percentile is in the interval [0,100]"""
out_arr = np.zeros_like(signal_col)
mins_per_window = 5
search_range = int(win_scope_mins / mins_per_window / 2)
for jdx in range(out_arr.size):
search_arr = signal_col[max(0, jdx - search_range):min(out_arr.size, jdx + search_range)]
out_arr[jdx] = np.percentile(search_arr, percentile)
return out_arr
def subsample_blocked(val_arr, meas_arr=None, ss_ratio=None, block_length=None, normal_value=None):
""" Subsample blocked with ratio and block length"""
val_arr_out = np.copy(val_arr)
meas_arr_out = np.copy(meas_arr)
meas_idxs = []
n_meas = 0
for idx in range(meas_arr.size):
if meas_arr[idx] > n_meas:
meas_idxs.append(idx)
n_meas += 1
if len(meas_idxs) == 0:
return (val_arr_out, meas_arr_out)
meas_select = int((1 - ss_ratio) * len(meas_idxs))
begin_select = meas_select // block_length
feas_begins = [meas_idxs[idx] for idx in np.arange(0, len(meas_idxs), block_length)]
sel_meas_begins = sorted(random.sample(feas_begins, begin_select))
sel_meas_delete = []
for begin in sel_meas_begins:
for add_idx in range(block_length):
sel_meas_delete.append(begin + add_idx)
# Rewrite the measuremnent array with deleted indices
for midx, meas_idx in enumerate(meas_idxs):
prev_cnt = 0 if meas_idx == 0 else meas_arr_out[meas_idx - 1]
revised_cnt = prev_cnt if meas_idx in sel_meas_delete else prev_cnt + 1
if midx < len(meas_idxs) - 1:
for rewrite_idx in range(meas_idx, meas_idxs[midx + 1]):
meas_arr_out[rewrite_idx] = revised_cnt
else:
for rewrite_idx in range(meas_idx, len(meas_arr_out)):
meas_arr_out[rewrite_idx] = revised_cnt
# Rewrite the value array with deleted indices, with assuming forward filling
for midx, meas_idx in enumerate(meas_idxs):
prev_val = normal_value if meas_idx == 0 else val_arr_out[meas_idx - 1]
cur_val = val_arr_out[meas_idx]
revised_value = prev_val if meas_idx in sel_meas_delete else cur_val
if midx < len(meas_idxs) - 1:
for rewrite_idx in range(meas_idx, meas_idxs[midx + 1]):
val_arr_out[rewrite_idx] = revised_value
else:
for rewrite_idx in range(meas_idx, len(meas_arr_out)):
val_arr_out[rewrite_idx] = revised_value
return (val_arr_out, meas_arr_out)
def subsample_individual(val_arr, meas_arr=None, ss_ratio=None, normal_value=None):
""" Subsample individual measurements completely randomly with random choice"""
val_arr_out = np.copy(val_arr)
meas_arr_out = np.copy(meas_arr)
meas_idxs = []
n_meas = 0
for idx in range(meas_arr.size):
if meas_arr[idx] > n_meas:
meas_idxs.append(idx)
n_meas += 1
if len(meas_idxs) == 0:
return (val_arr_out, meas_arr_out)
meas_select = int((1 - ss_ratio) * len(meas_idxs))
sel_meas_delete = sorted(random.sample(meas_idxs, meas_select))
# Rewrite the measuremnent array with deleted indices
for midx, meas_idx in enumerate(meas_idxs):
prev_cnt = 0 if meas_idx == 0 else meas_arr_out[meas_idx - 1]
revised_cnt = prev_cnt if meas_idx in sel_meas_delete else prev_cnt + 1
if midx < len(meas_idxs) - 1:
for rewrite_idx in range(meas_idx, meas_idxs[midx + 1]):
meas_arr_out[rewrite_idx] = revised_cnt
else:
for rewrite_idx in range(meas_idx, len(meas_arr_out)):
meas_arr_out[rewrite_idx] = revised_cnt
# Rewrite the value array with deleted indices, with assuming forward filling
for midx, meas_idx in enumerate(meas_idxs):
prev_val = normal_value if meas_idx == 0 else val_arr_out[meas_idx - 1]
cur_val = val_arr_out[meas_idx]
revised_value = prev_val if meas_idx in sel_meas_delete else cur_val
if midx < len(meas_idxs) - 1:
for rewrite_idx in range(meas_idx, meas_idxs[midx + 1]):
val_arr_out[rewrite_idx] = revised_value
else:
for rewrite_idx in range(meas_idx, len(meas_arr_out)):
val_arr_out[rewrite_idx] = revised_value
return (val_arr_out, meas_arr_out)
def merge_short_vent_gaps(vent_status_arr, short_gap_hours):
""" Merge short gaps in the ventilation status array"""
in_gap = False
gap_length = 0
before_gap_status = np.nan
for idx in range(len(vent_status_arr)):
cur_state = vent_status_arr[idx]
if in_gap and (cur_state == 0.0 or np.isnan(cur_state)):
gap_length += 5
elif not in_gap and (cur_state == 0.0 or np.isnan(cur_state)):
if idx > 0:
before_gap_status = vent_status_arr[idx - 1]
in_gap = True
in_gap_idx = idx
gap_length = 5
elif in_gap and cur_state == 1.0:
in_gap = False
after_gap_status = cur_state
if gap_length / 60. <= short_gap_hours:
vent_status_arr[in_gap_idx:idx] = 1.0
return vent_status_arr
def kernel_smooth_arr(input_arr, bandwidth=None):
""" Kernel smooth an input array with a Nadaraya-Watson kernel smoother"""
output_arr = np.copy(input_arr)
fin_arr = output_arr[np.isfinite(output_arr)]
time_axis = 5 * np.arange(len(output_arr))
fin_time = time_axis[np.isfinite(output_arr)]
# Return the unsmoothed array if fewer than 2 observations
if fin_arr.size < 2:
return output_arr
smoother = skks.NadarayaWatsonSmoother(smoothing_parameter=bandwidth)
fgrid = skgrid.FDataGrid([fin_arr], fin_time)
fd_smoothed = smoother.fit_transform(fgrid)
output_smoothed = fd_smoothed.data_matrix.flatten()
output_arr[np.isfinite(output_arr)] = output_smoothed
return output_arr
def delete_short_vent_events(vent_status_arr, short_event_hours):
""" Delete short events in the ventilation status array"""
in_event = False
event_length = 0
for idx in range(len(vent_status_arr)):
cur_state = vent_status_arr[idx]
if in_event and cur_state == 1.0:
event_length += 5
if not in_event and cur_state == 1.0:
in_event = True
event_length = 5
event_start_idx = idx
if in_event and (cur_state == 0.0 or np.isnan(cur_state)):
in_event = False
if event_length / 60. < short_event_hours:
vent_status_arr[event_start_idx:idx] = 0.0
return vent_status_arr
def ellis(x_orig):
""" ELLIS model converting SpO2 in 100 % units into a PaO2 ABGA
estimate"""
x_orig[np.isnan(x_orig)] = 98 # Normal value assumption
x = x_orig / 100
x[x == 1] = 0.999
exp_base = (11700 / ((1 / x) - 1))
exp_sqrbracket = np.sqrt(pow(50, 3) + (exp_base ** 2))
exp_first = np.cbrt(exp_base + exp_sqrbracket)
exp_second = np.cbrt(exp_base - exp_sqrbracket)
exp_full = exp_first + exp_second
return exp_full
def correct_left_edge_vent(vent_status_arr, etco2_meas_cnt, etco2_col):
""" Corrects the left edge of the ventilation status array, to pin-point the exact conditions"""
on_left_edge = False
in_event = False
# Correct left ventilation edges of the ventilation zone
for idx in range(len(vent_status_arr)):
if vent_status_arr[idx] == 1.0 and not in_event:
in_event = True
on_left_edge = True
if on_left_edge and in_event:
if vent_status_arr[idx] == 0.0:
in_event = False
on_left_edge = False
elif (idx == 0 and etco2_meas_cnt[idx] > 0 or etco2_meas_cnt[idx] - etco2_meas_cnt[idx - 1] >= 1) and \
etco2_col[idx] > 0.5:
on_left_edge = False
else:
vent_status_arr[idx] = 0.0
return vent_status_arr
def delete_small_continuous_blocks(event_arr, block_threshold=None):
""" Given an event array, deletes small contiguous blocks that are sandwiched between two other blocks, one of which
is longer, they both have the same label. For the moment we delete blocks smaller than 30 minutes. Note this
requires only a linear pass over the array"""
block_list = []
active_block = None
# Build a block list
for jdx in range(event_arr.size):
new_block = event_arr[jdx]
# Start a new block at the beginning
if active_block is None:
active_block = new_block
left_block_idx = jdx
# Change to a new block
elif not active_block == new_block:
block_list.append((active_block, left_block_idx, jdx - 1))
left_block_idx = jdx
active_block = new_block
# Same last block unconditionally
if jdx == event_arr.size - 1:
block_list.append((new_block, left_block_idx, jdx))
# Merge blocks
while True:
all_clean = True
for bidx, block in enumerate(block_list):
block_label, lidx, ridx = block
block_len = ridx - lidx + 1
# Candidate for merging
if block_len <= block_threshold:
if len(block_list) == 1:
all_clean = True
break
# Only right block
elif bidx == 0:
next_block = block_list[bidx + 1]
nb_label, nb_lidx, nb_ridx = next_block
nb_len = nb_ridx - nb_lidx + 1
# Merge blocks
if nb_len > block_len and nb_len > block_threshold:
block_list[bidx] = (nb_label, lidx, nb_ridx)
block_list.remove(next_block)
all_clean = False
break
# Only left block
elif bidx == len(block_list) - 1:
prev_block = block_list[bidx - 1]
pb_label, pb_lidx, pb_ridx = prev_block
pb_len = pb_ridx - pb_lidx + 1
if pb_len > block_len and pb_len > block_threshold:
block_list[bidx] = (pb_label, pb_lidx, ridx)
block_list.remove(prev_block)
all_clean = False
break
# Interior block
else:
prev_block = block_list[bidx - 1]
next_block = block_list[bidx + 1]
pb_label, pb_lidx, pb_ridx = prev_block
nb_label, nb_lidx, nb_ridx = next_block
pb_len = pb_ridx - pb_lidx + 1
nb_len = nb_ridx - nb_lidx + 1
if pb_label == nb_label and (pb_len > block_threshold or nb_len > block_threshold):
block_list[bidx] = (pb_label, pb_lidx, nb_ridx)
block_list.remove(prev_block)
block_list.remove(next_block)
all_clean = False
break
# Traversed block list with no actions required
if all_clean:
break
# Now back-translate the block list to the list
out_arr = np.copy(event_arr)
for blabel, lidx, ridx in block_list:
out_arr[lidx:ridx + 1] = blabel
# Additionally build an array where the two arrays are different
diff_arr = (out_arr != event_arr).astype(np.bool)
return (out_arr, diff_arr)
def collect_regression_data(spo2_col, spo2_meas_cnt, pao2_col, pao2_meas_cnt, fio2_est_arr,
sao2_col, sao2_meas_cnt, ph_col, ph_meas_cnt):
""" Collect regression data at time-stamps where we have a real PaO2 measurement, return
partial training X,y pairs for this patient"""
X_arr_collect = []
y_arr_collect = []
aux_collect = []
cur_pao2_cnt = 0
cur_spo2_cnt = 0
cur_sao2_cnt = 0
cur_ph_cnt = 0
pao2_real_meas = []
spo2_real_meas = []
sao2_real_meas = []
ph_real_meas = []
for jdx in range(spo2_col.size):
if spo2_meas_cnt[jdx] > cur_spo2_cnt:
spo2_real_meas.append(jdx)
cur_spo2_cnt = spo2_meas_cnt[jdx]
if sao2_meas_cnt[jdx] > cur_sao2_cnt:
sao2_real_meas.append(jdx)
cur_sao2_cnt = sao2_meas_cnt[jdx]
if ph_meas_cnt[jdx] > cur_ph_cnt:
ph_real_meas.append(jdx)
cur_ph_cnt = ph_meas_cnt[jdx]
if pao2_meas_cnt[jdx] > cur_pao2_cnt:
pao2_real_meas.append(jdx)
cur_pao2_cnt = pao2_meas_cnt[jdx]
# Only start fitting the model from the 2nd measurement onwards
if len(pao2_real_meas) >= 2 and len(spo2_real_meas) >= 2 and len(sao2_real_meas) >= 2 and len(
ph_real_meas) >= 2:
# Dimensions of features
# 0: Last real SpO2 measurement
# 1: Last real PaO2 measurement
# 2: Last real SaO2 measurement
# 3: Last real pH measurement
# 4: Time to last real SpO2 measurement
# 5: Time to last real PaO2 measurement
# 6: Closest SpO2 to last real PaO2 measurement
x_vect = np.array([spo2_col[jdx - 1], pao2_col[jdx - 1], sao2_col[jdx - 1], ph_col[jdx - 1],
jdx - spo2_real_meas[-2], jdx - pao2_real_meas[-2], spo2_col[pao2_real_meas[-2]]])
y_val = pao2_col[jdx]
aux_val = fio2_est_arr[jdx]
if np.isnan(x_vect).sum() == 0 and np.isfinite(y_val) and np.isfinite(aux_val):
X_arr_collect.append(x_vect)
y_arr_collect.append(y_val)
aux_collect.append(aux_val)
if len(X_arr_collect) > 0:
X_arr = np.vstack(X_arr_collect)
y_arr = np.array(y_arr_collect)
aux_arr = np.array(aux_collect)
assert (np.isnan(X_arr).sum() == 0 and np.isnan(y_arr).sum() == 0)
return (X_arr, y_arr, aux_arr)
else:
return (None, None, None)
def delete_low_density_hr_gap(vent_status_arr, hr_status_arr, configs=None):
""" Deletes gaps in ventilation which are caused by likely sensor dis-connections"""
in_event = False
in_gap = False
gap_idx = -1
for idx in range(len(vent_status_arr)):
# Beginning of new event, not from inside gap
if not in_event and not in_gap and vent_status_arr[idx] == 1.0:
in_event = True
# Beginning of potential gap that needs to be closed
elif in_event and vent_status_arr[idx] == 0.0:
in_gap = True
gap_idx = idx
in_event = False
# The gap is over, re-assign the status of ventilation to merge the gap, enter new event
if in_gap and vent_status_arr[idx] == 1.0:
hr_sub_arr = hr_status_arr[gap_idx:idx]
# Close the gap if the density of HR is too low in between
if np.sum(hr_sub_arr) / hr_sub_arr.size <= configs["vent_hr_density_threshold"]:
vent_status_arr[gap_idx:idx] = 1.0
in_gap = False
in_event = True
return vent_status_arr
def suppox_to_fio2(suppox_val):
""" Conversion of supplemental oxygen to FiO2 estimated value"""
if suppox_val > 15:
return 75
else:
return SUPPOX_TO_FIO2[suppox_val]
def conservative_state(state1, state2):
""" Given two states, return the lower one """
if state1 == state2:
return state1
for skey in ["event_0", "event_1", "event_2"]:
if state1 == skey or state2 == skey:
return skey
return "event_3"
def endpoint_gen_benchmark(configs):
var_map = configs["VAR_IDS"]
raw_var_map = configs["RAW_VAR_IDS"]
sz_window = configs["length_fw_window"]
abga_window = configs["length_ABGA_window"]
missing_unm = 0
# Threshold statistics
stat_counts_ready_and_failure = 0
stat_counts_ready_and_success = 0
stat_counts_nready_and_failure = 0
stat_counts_nready_and_success = 0
stat_counts_ready_nextube = 0
stat_counts_nready_nextube = 0
imputed_f = configs["imputed_path"]
merged_f = os.path.join(configs["merged_h5"])
out_folder = os.path.join(configs["endpoint_path"])
if not os.path.exists(out_folder):
os.mkdir(out_folder)
batch_id = configs["batch_idx"]
logging.info("Generating endpoints for batch {}".format(batch_id))
batch_fpath = os.path.join(imputed_f, "batch_{}.parquet".format(batch_id))
if not os.path.exists(batch_fpath):
logging.info("WARNING: Input file does not exist, exiting...")
sys.exit(1)
df_batch = pd.read_parquet(os.path.join(imputed_f, "batch_{}.parquet".format(batch_id)))
logging.info("Loaded imputed data done...")
cand_raw_batch = glob.glob(os.path.join(merged_f, "part-{}.parquet".format(batch_id)))
assert (len(cand_raw_batch) == 1)
pids = list(df_batch.patientid.unique())
logging.info("Number of patients in batch: {}".format(len(df_batch.patientid.unique())))
first_write = True
out_fp = os.path.join(out_folder, "batch_{}.parquet".format(batch_id))
event_count = {"FIO2_AVAILABLE": 0, "SUPPOX_NO_MEAS_12_HOURS_LIMIT": 0, "SUPPOX_MAIN_VAR": 0, "SUPPOX_HIGH_FLOW": 0,
"SUPPOX_NO_FILL_STOP": 0}
readiness_ext_count = 0
not_ready_ext_count = 0
readiness_and_extubated_cnt = 0
extubated_cnt = 0
df_static = pd.read_parquet(configs["general_data_table_path"])
X_reg_collect = []
y_reg_collect = []
aux_reg_collect = []
out_dfs = []
for pidx, pid in enumerate(pids):
df_pid = df_batch[df_batch["patientid"] == pid]
if df_pid.shape[0] == 0:
logging.info("WARNING: No input data for PID: {}".format(pid))
continue
df_merged_pid = pd.read_parquet(cand_raw_batch[0], filters=[("patientid", "=", pid)])
df_merged_pid.sort_values(by="datetime", inplace=True)
suppox_val = {}
suppox_ts = {}
# Main route of SuppOx
df_suppox_red_async = df_merged_pid[[var_map["SuppOx"], "datetime"]]
df_suppox_red_async = df_suppox_red_async.dropna(how="all", thresh=2)
suppox_async_red_ts = np.array(df_suppox_red_async["datetime"])
suppox_val["SUPPOX"] = np.array(df_suppox_red_async[var_map["SuppOx"]])
# Strategy is to create an imputed SuppOx column based on the spec using
# forward filling heuristics
# Relevant meta-variables
fio2_col = np.array(df_pid[var_map["FiO2"]])
pao2_col = np.array(df_pid[var_map["PaO2"]])
etco2_col = np.array(df_pid[var_map["etCO2"]])
paco2_col = np.array(df_pid[var_map["PaCO2"]])
gcs_a_col = np.array(df_pid[var_map["GCS_Antwort"]])
gcs_m_col = np.array(df_pid[var_map["GCS_Motorik"]])
gcs_aug_col = np.array(df_pid[var_map["GCS_Augen"]])
weight_col = np.array(df_pid[var_map["Weight"][0]])
noreph_col = np.array(df_pid[var_map["Norephenephrine"][0]])
epineph_col = np.array(df_pid[var_map["Epinephrine"][0]])
vaso_col = np.array(df_pid[var_map["Vasopressin"][0]])
milri_col = np.array(df_pid[var_map["Milrinone"][0]])
dobut_col = np.array(df_pid[var_map["Dobutamine"][0]])
levosi_col = np.array(df_pid[var_map["Levosimendan"][0]])
theo_col = np.array(df_pid[var_map["Theophyllin"][0]])
lactate_col = np.array(df_pid[var_map["Lactate"][0]])
peep_col = np.array(df_pid[var_map["PEEP"]])
# Heartrate
hr_col = np.array(df_pid[var_map["HR"]])
hr_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["HR"])])
# Temperature
temp_col = np.array(df_pid[var_map["Temp"]])
temp_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["Temp"])])
rrate_col = np.array(df_pid[var_map["RRate"]])
tv_col = np.array(df_pid[var_map["TV"]])
map_col = np.array(df_pid[var_map["MAP"][0]])
airway_col = np.array(df_pid[var_map["Airway"]])
# Ventilator mode group columns
vent_mode_col = np.array(df_pid[var_map["vent_mode"]])
spo2_col = np.array(df_pid[var_map["SpO2"]])
if configs["presmooth_spo2"]:
spo2_col = percentile_smooth(spo2_col, configs["spo2_smooth_percentile"],
configs["spo2_smooth_window_size_mins"])
sao2_col = np.array(df_pid[var_map["SaO2"]])
ph_col = np.array(df_pid[var_map["pH"]])
fio2_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["FiO2"])])
pao2_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["PaO2"])])
etco2_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["etCO2"])])
peep_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["PEEP"])])
hr_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["HR"])])
spo2_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["SpO2"])])
sao2_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["SaO2"])])
ph_meas_cnt = np.array(df_pid["{}_IMPUTED_STATUS_CUM_COUNT".format(var_map["pH"])])
abs_dtime_arr = np.array(df_pid["datetime"])
event_status_arr = np.zeros(shape=(fio2_col.size), dtype="<S10")
# Status arrays
pao2_avail_arr = np.zeros(shape=(fio2_col.size))
fio2_avail_arr = np.zeros(shape=(fio2_col.size))
fio2_suppox_arr = np.zeros(shape=(fio2_col.size))
fio2_ambient_arr = np.zeros(shape=(fio2_col.size))
pao2_sao2_model_arr = np.zeros(shape=(fio2_col.size))
pao2_full_model_arr = np.zeros(shape=(fio2_col.size))
ratio_arr = np.zeros(shape=(fio2_col.size))
sur_ratio_arr = np.zeros(shape=(fio2_col.size))
pao2_est_arr = np.zeros(shape=(fio2_col.size))
fio2_est_arr = np.zeros(shape=(fio2_col.size))
vent_status_arr = np.zeros(shape=(fio2_col.size))
readiness_ext_arr = np.zeros(shape=(fio2_col.size))
readiness_ext_arr[:] = np.nan
# Votes arrays
vent_votes_arr = np.zeros(shape=(fio2_col.size))
vent_votes_etco2_arr = np.zeros(shape=(fio2_col.size))
vent_votes_ventgroup_arr = np.zeros(shape=(fio2_col.size))
vent_votes_tv_arr = np.zeros(shape=(fio2_col.size))
vent_votes_airway_arr = np.zeros(shape=(fio2_col.size))
peep_status_arr = np.zeros(shape=(fio2_col.size))
peep_threshold_arr = np.zeros(shape=(fio2_col.size))
hr_status_arr = np.zeros(shape=(fio2_col.size))
etco2_status_arr = np.zeros(shape=(fio2_col.size))
event_status_arr.fill("UNKNOWN")
# Array pointers tracking the current active value of each type
suppox_async_red_ptr = -1
# ======================== VENTILATION ================================================================================================
# Label each point in the 30 minute window with ventilation
in_vent_event = False
for jdx in range(0, len(ratio_arr)):
low_vent_idx = max(0, jdx - configs["peep_search_bw"])
high_vent_idx = min(len(ratio_arr), jdx + configs["peep_search_bw"])
low_peep_idx = max(0, jdx - configs["peep_search_bw"])
high_peep_idx = min(len(ratio_arr), jdx + configs["peep_search_bw"])
low_hr_idx = max(0, jdx - configs["hr_vent_search_bw"])
high_hr_idx = min(len(ratio_arr), jdx + configs["hr_vent_search_bw"])
win_etco2 = etco2_col[low_vent_idx:high_vent_idx]
win_etco2_meas = etco2_meas_cnt[low_vent_idx:high_vent_idx]
win_peep = peep_col[low_peep_idx:high_peep_idx]
win_peep_meas = peep_meas_cnt[low_peep_idx:high_peep_idx]
win_hr_meas = hr_meas_cnt[low_hr_idx:high_hr_idx]
etco2_meas_win = win_etco2_meas[-1] - win_etco2_meas[0] > 0
peep_meas_win = win_peep_meas[-1] - win_peep_meas[0] > 0
hr_meas_win = win_hr_meas[-1] - win_hr_meas[0] > 0
current_vent_group = vent_mode_col[jdx]
current_tv = tv_col[jdx]
current_airway = airway_col[jdx]
vote_score = 0
# EtCO2 requirement (still needed)
if etco2_meas_win and (win_etco2 > 0.5).any():
vote_score += 2
vent_votes_etco2_arr[jdx] = 2
# Ventilation group requirement (still needed)
if current_vent_group in [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 10.0]:
vote_score += 1
vent_votes_ventgroup_arr[jdx] += 1
elif current_vent_group in [1.0]:
vote_score -= 1
vent_votes_ventgroup_arr[jdx] -= 1
elif current_vent_group in [11.0, 12.0, 13.0, 15.0, 17.0]:
vote_score -= 2
vent_votes_ventgroup_arr[jdx] -= 2
# TV presence requirement (still needed)
if current_tv > 0:
vote_score += 1
vent_votes_tv_arr[jdx] = 1
# Airway requirement (still needed)
if current_airway in [1, 2]:
vote_score += 2
vent_votes_airway_arr[jdx] = 2
# No airway (still needed)
if current_airway in [3, 4, 5, 6]:
vote_score -= 1
vent_votes_airway_arr[jdx] = -1
vent_votes_arr[jdx] = vote_score
if vote_score >= configs["vent_vote_threshold"]:
in_vent_event = True
vent_status_arr[jdx] = 1
else:
in_vent_event = False
if peep_meas_win:
peep_status_arr[jdx] = 1
if (win_peep >= configs["peep_threshold"]).any():
peep_threshold_arr[jdx] = 1
if etco2_meas_win:
etco2_status_arr[jdx] = 1
if hr_meas_win:
hr_status_arr[jdx] = 1
if configs["detect_hr_gaps"]:
vent_status_arr = delete_low_density_hr_gap(vent_status_arr, hr_status_arr, configs=configs)
if configs["merge_short_vent_gaps"]:
vent_status_arr = merge_short_vent_gaps(vent_status_arr, configs["short_gap_hours"])
if configs["delete_short_vent_events"]:
vent_status_arr = delete_short_vent_events(vent_status_arr, configs["short_event_hours"])
# vent_status_arr=correct_left_edge_vent(vent_status_arr, etco2_meas_cnt, etco2_col)
# vent_status_arr=correct_right_edge_vent(vent_status_arr, etco2_meas_cnt, etco2_col)
# Ventilation period array
vent_period_arr = np.copy(vent_status_arr)
# Delete short ventilation periods if no HR gap before
in_event = False
event_length = 0
for idx in range(len(vent_period_arr)):
cur_state = vent_period_arr[idx]
if in_event and cur_state == 1.0:
event_length += 5
if not in_event and cur_state == 1.0:
in_event = True
event_length = 5
event_start_idx = idx
if in_event and (np.isnan(cur_state) or cur_state == 0.0):
in_event = False
# Short event at beginning of stay shall never be deleted...
if event_start_idx == 0:
delete_event = False
else:
search_hr_idx = event_start_idx - 1
while search_hr_idx >= 0:
if hr_status_arr[search_hr_idx] == 1.0:
hr_gap_length = 5 * (event_start_idx - search_hr_idx)
delete_event = True
break
search_hr_idx -= 1
# Found no HR before event, do not delete event...
if search_hr_idx == -1:
delete_event = False
# Delete event in principle, then check if short enough...
if delete_event:
event_length += hr_gap_length
if event_length / 60. <= configs["short_event_hours_vent_period"]:
vent_period_arr[event_start_idx:idx] = 0.0
# ============================== OXYGENATION ENDPOINTS ==================================================================
# Label each point in the 30 minute window (except ventilation)
for jdx in range(0, len(ratio_arr)):
# Advance to the last SuppOx infos before grid point
cur_time = abs_dtime_arr[jdx]
while True:
suppox_async_red_ptr = suppox_async_red_ptr + 1
if suppox_async_red_ptr >= len(suppox_async_red_ts) or suppox_async_red_ts[
suppox_async_red_ptr] > cur_time:
suppox_async_red_ptr = suppox_async_red_ptr - 1
break
# Estimate the current FiO2 value
bw_fio2 = fio2_col[max(0, jdx - configs["sz_fio2_window"]):jdx + 1]
bw_fio2_meas = fio2_meas_cnt[max(0, jdx - configs["sz_fio2_window"]):jdx + 1]
bw_etco2_meas = etco2_meas_cnt[max(0, jdx - configs["sz_etco2_window"]):jdx + 1]
fio2_meas = bw_fio2_meas[-1] - bw_fio2_meas[0] > 0
etco2_meas = bw_etco2_meas[-1] - bw_etco2_meas[0] > 0
mode_group_est = vent_mode_col[jdx]
# FiO2 is measured since beginning of stay and EtCO2 was measured, we use FiO2 (indefinite forward filling)
# if ventilation is active or the current estimate of ventilation mode group is NIV.
if fio2_meas and (vent_status_arr[jdx] == 1.0 or mode_group_est == 4.0):
event_count["FIO2_AVAILABLE"] += 1
fio2_val = bw_fio2[-1] / 100
fio2_avail_arr[jdx] = 1
# Use supplemental oxygen or ambient air oxygen
else:
# No real measurements up to now, or the last real measurement
# was more than 8 hours away.
if suppox_async_red_ptr == -1 or (
cur_time - suppox_async_red_ts[suppox_async_red_ptr]) > np.timedelta64(
configs["suppox_max_ffill"], 'h'):
event_count["SUPPOX_NO_MEAS_12_HOURS_LIMIT"] += 1
fio2_val = configs["ambient_fio2"]
fio2_ambient_arr[jdx] = 1
# Find the most recent source variable of SuppOx
else:
suppox = suppox_val["SUPPOX"][suppox_async_red_ptr]
# SuppOx information from main source
if np.isfinite(suppox):
event_count["SUPPOX_MAIN_VAR"] += 1
fio2_val = suppox_to_fio2(int(suppox)) / 100
fio2_suppox_arr[jdx] = 1
else:
assert (False, "Impossible condition")
bw_pao2_meas = pao2_meas_cnt[max(0, jdx - configs["sz_pao2_window"]):jdx + 1]
bw_pao2 = pao2_col[max(0, jdx - configs["sz_pao2_window"]):jdx + 1]
pao2_meas = bw_pao2_meas[-1] - bw_pao2_meas[0] >= 1
# PaO2 was just measured, just use the value
if pao2_meas:
pao2_estimate = bw_pao2[-1]
pao2_avail_arr[jdx] = 1
# Have to forecast PaO2 from a previous SpO2
else:
bw_spo2 = spo2_col[max(0, jdx - abga_window):jdx + 1]
bw_spo2_meas = spo2_meas_cnt[max(0, jdx - abga_window):jdx + 1]
spo2_meas = bw_spo2_meas[-1] - bw_spo2_meas[0] >= 1
# Standard case, take the last SpO2 measurement
if spo2_meas:
spo2_val = bw_spo2[-1]
pao2_estimate = ellis(np.array([spo2_val]))[0]
# Extreme edge case, there was SpO2 measurement in the last 24 hours
else:
spo2_val = 98
pao2_estimate = ellis(np.array([spo2_val]))[0]
# Compute the individual components of the Horowitz index
pao2_est_arr[jdx] = pao2_estimate
fio2_est_arr[jdx] = fio2_val
pao2_est_arr_orig = np.copy(pao2_est_arr)
# Smooth individual components of the P/F ratio estimate
if configs["kernel_smooth_estimate_pao2"]:
pao2_est_arr = kernel_smooth_arr(pao2_est_arr, bandwidth=configs["smoothing_bandwidth"])
if configs["kernel_smooth_estimate_fio2"]:
fio2_est_arr = kernel_smooth_arr(fio2_est_arr, bandwidth=configs["smoothing_bandwidth"])
# Test2 data-set for surrogate model
pao2_sur_est = np.copy(pao2_est_arr)
assert (np.sum(np.isnan(pao2_sur_est)) == 0)
# Convex combination of the estimate
if configs["mix_real_estimated_pao2"]:
pao2_est_arr = mix_real_est_pao2(pao2_col, pao2_meas_cnt, pao2_est_arr,
bandwidth=configs["smoothing_bandwidth"])
# Compute Horowitz indices (Kernel pipeline / Surrogate model pipeline)
for jdx in range(len(ratio_arr)):
ratio_arr[jdx] = pao2_est_arr[jdx] / fio2_est_arr[jdx]
# Post-smooth Horowitz index
if configs["post_smooth_pf_ratio"]:
ratio_arr = kernel_smooth_arr(ratio_arr, bandwidth=configs["post_smoothing_bandwidth"])
if configs["pao2_version"] == "ellis_basic":
pf_event_est_arr = np.copy(ratio_arr)
elif configs["pao2_version"] == "original":
assert (False)
# Now label based on the array of estimated Horowitz indices
for idx in range(0, len(event_status_arr) - configs["offset_back_windows"]):
est_idx = pf_event_est_arr[idx:min(len(ratio_arr), idx + sz_window)]
est_vent = vent_status_arr[idx:min(len(ratio_arr), idx + sz_window)]
est_peep_dense = peep_status_arr[idx:min(len(ratio_arr), idx + sz_window)]
est_peep_threshold = peep_threshold_arr[idx:min(len(ratio_arr), idx + sz_window)]
if np.sum((est_idx <= 100) & (
(est_vent == 0.0) | (est_vent == 1.0) & (est_peep_dense == 0.0) | (est_vent == 1.0) & (
est_peep_dense == 1.0) & (est_peep_threshold == 1.0))) >= 2 / 3 * len(est_idx):
event_status_arr[idx] = "event_3"
elif np.sum((est_idx <= 200) & (
(est_vent == 0.0) | (est_vent == 1.0) & (est_peep_dense == 0.0) | (est_vent == 1.0) & (
est_peep_dense == 1.0) & (est_peep_threshold == 1.0))) >= 2 / 3 * len(est_idx):
event_status_arr[idx] = "event_2"
elif np.sum((est_idx <= 300) & (
(est_vent == 0.0) | (est_vent == 1.0) & (est_peep_dense == 0.0) | (est_vent == 1.0) & (
est_peep_dense == 1.0) & (est_peep_threshold == 1.0))) >= 2 / 3 * len(est_idx):
event_status_arr[idx] = "event_1"
elif np.sum(np.isnan(est_idx)) < 2 / 3 * len(est_idx):
event_status_arr[idx] = "event_0"
# Re-traverse the array and correct the right edges of events
# Correct right edges of event 0 (correct level to level 0)
on_right_edge = False
in_event = False
for idx in range(0, len(event_status_arr) - configs["offset_back_windows"]):
cur_state = event_status_arr[idx].decode()
if cur_state in ["event_0"] and not in_event:
in_event = True
elif in_event and cur_state not in ["event_0"]:
in_event = False
on_right_edge = True
if on_right_edge:
if pf_event_est_arr[idx] < 300:
on_right_edge = False
else:
event_status_arr[idx] = "event_0"
# Correct right edges of event 1 (correct to level 1)
on_right_edge = False
in_event = False
for idx in range(0, len(event_status_arr) - configs["offset_back_windows"]):
cur_state = event_status_arr[idx].decode()
if cur_state in ["event_1"] and not in_event:
in_event = True
elif in_event and cur_state not in ["event_1"]:
in_event = False
on_right_edge = True
if on_right_edge:
if pf_event_est_arr[idx] < 200 or pf_event_est_arr[idx] >= 300:
on_right_edge = False
else:
event_status_arr[idx] = "event_1"
# Correct right edges of event 2 (correct to level 2)
on_right_edge = False
in_event = False
for idx in range(0, len(event_status_arr) - configs["offset_back_windows"]):
cur_state = event_status_arr[idx].decode()
if cur_state in ["event_2"] and not in_event:
in_event = True
elif in_event and cur_state not in ["event_2"]:
in_event = False
on_right_edge = True
if on_right_edge:
if pf_event_est_arr[idx] < 100 or pf_event_est_arr[idx] >= 200:
on_right_edge = False
else:
event_status_arr[idx] = "event_2"
# Correct right edges of event 3 (correct to level 3)
on_right_edge = False
in_event = False
for idx in range(0, len(event_status_arr) - configs["offset_back_windows"]):
cur_state = event_status_arr[idx].decode()
if cur_state in ["event_3"] and not in_event:
in_event = True
elif in_event and cur_state not in ["event_3"]:
in_event = False
on_right_edge = True
if on_right_edge:
if pf_event_est_arr[idx] >= 100:
on_right_edge = False
else:
event_status_arr[idx] = "event_3"
circ_status_arr = np.zeros_like(map_col)
# Computation of the circulatory failure toy version of the endpoint
for jdx in range(0, len(event_status_arr)):
map_subarr = map_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
lact_subarr = lactate_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
milri_subarr = milri_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
dobut_subarr = dobut_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
levosi_subarr = levosi_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
theo_subarr = theo_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
noreph_subarr = noreph_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
epineph_subarr = epineph_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
vaso_subarr = vaso_col[max(0, jdx - 12):min(jdx + 12, len(event_status_arr))]
map_crit_arr = ((map_subarr < 65) | (milri_subarr > 0) | (dobut_subarr > 0) | (levosi_subarr > 0) | (
theo_subarr > 0) | (noreph_subarr > 0) | \
(epineph_subarr > 0) | (vaso_subarr > 0))
lact_crit_arr = (lact_subarr > 2)
if np.sum(map_crit_arr) >= 2 / 3 * len(map_crit_arr) and np.sum(lact_crit_arr) >= 2 / 3 * len(map_crit_arr):
circ_status_arr[jdx] = 1.0
# Traverse the array and delete short gap
event_status_arr, relabel_arr = delete_small_continuous_blocks(event_status_arr,
block_threshold=configs[
"pf_event_merge_threshold"])
time_col = np.array(df_pid["datetime"])
rel_time_col = np.array(df_pid["rel_datetime"])
pid_col = np.array(df_pid["patientid"])
df_out_dict = {}
df_out_dict["datetime"] = time_col
df_out_dict["rel_datetime"] = rel_time_col
df_out_dict["patientid"] = pid_col
status_list = list(map(lambda raw_str: raw_str.decode("unicode_escape"), event_status_arr.tolist()))
df_out_dict["resp_failure_status"] = status_list
df_out_dict["resp_failure_status_relabel"] = relabel_arr
# Status columns
df_out_dict["fio2_available"] = fio2_avail_arr
df_out_dict["fio2_suppox"] = fio2_suppox_arr
df_out_dict["fio2_ambient"] = fio2_ambient_arr
df_out_dict["fio2_estimated"] = fio2_est_arr
df_out_dict["pao2_estimated"] = pao2_est_arr
df_out_dict["pao2_estimated_sur"] = pao2_sur_est
df_out_dict["pao2_available"] = pao2_avail_arr
df_out_dict["pao2_sao2_model"] = pao2_sao2_model_arr
df_out_dict["pao2_full_model"] = pao2_full_model_arr
df_out_dict["estimated_ratio"] = ratio_arr
df_out_dict["estimated_ratio_sur"] = sur_ratio_arr
df_out_dict["vent_state"] = vent_status_arr
df_out_dict["vent_period"] = vent_period_arr
# Ventilation voting base columns
df_out_dict["vent_votes"] = vent_votes_arr
df_out_dict["vent_votes_etco2"] = vent_votes_etco2_arr
df_out_dict["vent_votes_ventgroup"] = vent_votes_ventgroup_arr
df_out_dict["vent_votes_tv"] = vent_votes_tv_arr
df_out_dict["vent_votes_airway"] = vent_votes_airway_arr
# Circulatory failure related
df_out_dict["circ_failure_status"] = circ_status_arr
df_out = pd.DataFrame(df_out_dict)
out_dfs.append(df_out)
all_df = | pd.concat(out_dfs, axis=0) | pandas.concat |
import streamlit as st
import pandas as pd
import altair as alt
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from sklearn.metrics import confusion_matrix
################## CSS Stuff ##################
# load in css file (from: https://discuss.streamlit.io/t/colored-boxes-around-sections-of-a-sentence/3201/2)
def local_css(file_name):
with open(file_name) as f:
st.markdown('<style>{}</style>'.format(f.read()), unsafe_allow_html=True)
local_css("style.css")
################## Intro ##################
st.title("Analyzing Credit Card Defaults")
st.markdown("<p class='subtitle'>By <NAME> & <NAME></p>", unsafe_allow_html=True)
st.markdown("<h2>I. What is a Default?</h2>", unsafe_allow_html=True)
defaultDescription ="""
<p class='important-container'>
A <b>default</b> is a failure to make a payment on a credit card bill
by the due date. The usual consequence for a default is a raise in
interest rates to the default, or decrease the line of credit.
</p>
<p>
Our Data: <a href='https://www.kaggle.com/mishra5001/credit-card?select=application_data.csv&fbclid=IwAR1BFzFdio_1DgfBYb_tc7uf6sCKYB4Ajz3aqUeqrEmkn41-J0hpX5HWFNk'>Source</a>
</p>
"""
st.markdown(defaultDescription, unsafe_allow_html=True)
@st.cache
def load_data(url):
return | pd.read_csv(url) | pandas.read_csv |
import os
import pandas
from mlopenapp.pipelines import vectorization as vct,\
text_preprocessing as tpp, \
logistic_regression as lr, \
metrics as mtr
from mlopenapp.utils import io_handler as io
from mlopenapp.utils import plotter, data_handler
# These will be replaced by user input
train_paths = [
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir,
'data/user_data/train/pos/'),
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir,
'data/user_data/train/neg/')
]
train_sentiments = [1, 0]
test_paths = [
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir,
'data/user_data/test/pos/'),
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir,
'data/user_data/test/neg/')
]
test_sentiments = [1, 0]
def get_params(run=True):
if not run:
params = {"train - pos samples": ("upload"), "train - neg samples": ("upload"),
"test - pos samples": ("upload"), "test - neg samples": ("upload"), }
return params
else:
return ""
def train(inpt, params=None):
"""
Creates a model based on a train and a test dataframes, and calculates model metrics
"""
print("Preparing Data. . .")
df_test = pandas.DataFrame()
df_train = | pandas.DataFrame() | pandas.DataFrame |
"""Tests for cell_img.common.df_style."""
from absl.testing import absltest
from cell_img.common import df_style
import numpy as np
import numpy.testing as npt
import pandas as pd
class DfStyleTest(absltest.TestCase):
def testColorizerWithInts(self):
values = [1, 2, 3, 4]
c = df_style.make_colorizer_from_series(pd.Series(values))
for v in values:
self.assertTrue(c(v).startswith('background-color: #'))
def testColorizerWithStrings(self):
values = ['a', 'b', 'cc', 'd']
c = df_style.make_colorizer_from_series(pd.Series(values))
for v in values:
self.assertTrue(c(v).startswith('background-color: #'))
def testColorizerWithMixedTypes(self):
values = [1, 2.0, 'cc', 'd']
c = df_style.make_colorizer_from_series(pd.Series(values))
for v in values:
self.assertTrue(c(v).startswith('background-color: #'))
def testColorizerWithNan(self):
values = [np.nan]
c = df_style.make_colorizer_from_series(pd.Series(values))
self.assertEqual(c(float(np.nan)), 'background-color: #cccccc')
def testColorizerWithWrongValue(self):
values = [0, 1, 2, 3]
c = df_style.make_colorizer_from_series( | pd.Series(values) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
__author__ = "<NAME>"
"""
Dash Server for visualizing the decision boundrary of a DenseNet (or general CNN with adapdation) classifier.
Several parts regarding the DB handling are adapted from <NAME>, github.com/choosehappy
"""
# In[5]:
import re
import colorsys
import matplotlib.cm
import argparse
import flask
import umap
import tables
import numpy as np
import pandas as pd
from textwrap import dedent as d
from pathlib import Path
# import jupyterlab_dash
from sklearn.metrics import confusion_matrix
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision.models import DenseNet
from torch.utils.data.dataloader import default_collate
import albumentations as albmt
from albumentations.pytorch import ToTensor
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
parser = argparse.ArgumentParser(description='Run a server for visualization of a CNN classifier')
parser.add_argument('--load_from_file', '-l', action='store_true', default=False, help='Load the embedding from a csv file. Does not compute the embedding',)
parser.add_argument('--target_class', '-t', default=None, help='Target Label, if the classifier was trained in one vs all fashion',)
parser.add_argument('--port', '-p', help='Server Port', default=8050, type = int)
parser.add_argument('database', help='Database containing image patches, labels ...',)
parser.add_argument('filename', help='Creates a csv file of the embedding')
parser.add_argument('model', help='Saved torch model dict, and architecture')
arguments = parser.parse_args()
file_name = arguments.filename
use_existing = arguments.load_from_file
target_class = arguments.target_class
use_port = arguments.port
db_path = arguments.database
model_path = arguments.model
batch_size = 32
patch_size = 224
server = flask.Flask(__name__)
app = dash.Dash(__name__, server=server)
# depending on how many colors needed taking either the tab 10 or tab20 pallete
def color_pallete(n):
num = int(min(np.ceil(5/10), 2)*10)
colors = matplotlib.cm.get_cmap(f'tab{num}').colors
if n > 20:
return ['#%02x%02x%02x' % tuple(
np.array(np.array(colorsys.hsv_to_rgb(i,0.613,246 ))*255,
dtype=np.uint8)) for i in np.linspace(0, 1, n+1)][:-1]
return ['#%02x%02x%02x' % tuple(np.array(np.array(i) * 255,dtype=np.uint8)) for i in colors]
class Dataset(object):
"Dabase handler for torch.utils.DataLoader written by <NAME>"
def __init__(self, fname, img_transform=None):
self.fname = fname
self.img_transform = img_transform
with tables.open_file(self.fname, 'r') as db:
self.nitems = db.root.imgs.shape[0]
self.imgs = None
self.filenames = None
self.label = None
def __getitem__(self, index):
# opening should be done in __init__ but seems to be
# an issue with multithreading so doing here. need to do it everytime, otherwise hdf5 crashes
with tables.open_file(self.fname, 'r') as db:
self.imgs = db.root.imgs
self.filenames = db.root.filenames
self.label = db.root.labels
# get the requested image and mask from the pytable
img = self.imgs[index, :, :, :]
fname = self.filenames[index]
label = self.label[index]
img_new = img
if self.img_transform:
img_new = self.img_transform(image=img)['image']
return img_new, img, label, fname
def __len__(self):
return self.nitems
# In[7]:
def get_dataloader(batch_size, patch_size, db_path):
# +
def id_collate(batch):
new_batch = []
ids = []
for _batch in batch:
new_batch.append(_batch[:-1])
ids.append(_batch[-1])
return default_collate(new_batch), ids
# +
img_transform = albmt.Compose([
albmt.RandomSizedCrop((patch_size, patch_size), patch_size, patch_size),
ToTensor()
])
if db_path[0] != '/':
db_path = f'./{db_path}'
# more workers do not seem no improve perfomance
dataset = Dataset(db_path, img_transform=img_transform)
dataLoader = DataLoader(dataset, batch_size=batch_size,
shuffle=False, num_workers=0,
pin_memory=True, collate_fn=id_collate)
print(f"dataset size:\t{len(dataset)}")
# -
return dataLoader, dataset
def load_model(model_path):
device = torch.device('cuda')
checkpoint = torch.load(
model_path, map_location=lambda storage, loc: storage)
# load checkpoint to CPU and then put to device https://discuss.pytorch.org/t/saving-and-loading-torch-models-on-2-machines-with-different-number-of-gpu-devices/6666
model = DenseNet(growth_rate=checkpoint["growth_rate"],
block_config=checkpoint["block_config"],
num_init_features=checkpoint["num_init_features"],
bn_size=checkpoint["bn_size"],
drop_rate=checkpoint["drop_rate"],
num_classes=checkpoint["num_classes"]).to(device)
model.load_state_dict(checkpoint["model_dict"])
print(
f"total params: \t{sum([np.prod(p.size()) for p in model.parameters()])}")
model.eval()
return model, device
def load_embedding(dataLoader, model, device):
out = {}
def hook(module, input, output):
out[module] = input[0]
# works for torchvision.models.DenseNet, register_forward_hook on last layer before classifier.
model.classifier.register_forward_hook(hook)
# +
# all_preds=[]
all_last_layer = []
all_fnames = []
all_labels = []
all_predictions = []
# cmatrix = np.zeros((checkpoint['num_classes'], checkpoint['num_classes']))
# add notification sstuff? (X, xorig, label), fname = next(iter(dataLoader[phase]))
for (X, xorig, label), fname in dataLoader:
X = X.to(device)
label_pred = model(X)
last_layer = out[model.classifier].detach().cpu().numpy()
all_last_layer.append(last_layer)
# yflat = label.numpy() == target_class
all_labels.extend(label.numpy())
pred_class = np.argmax(label_pred.detach().cpu().numpy(), axis=1)
all_predictions.extend(pred_class)
all_fnames.extend([Path(fn.decode()).name for fn in fname])
# cmatrix = cmatrix + \
# confusion_matrix(yflat, pred_class, labels=range(
# checkpoint['num_classes']))
# print(cmatrix)
# acc = (cmatrix/cmatrix.sum()).trace()
# print(acc)
features_hists = np.vstack(all_last_layer)
# -
# +
reducer = umap.UMAP(n_neighbors=50, min_dist=0.0, n_components=3)
embedding = reducer.fit_transform(features_hists)
return embedding, all_labels, all_predictions, dataset, all_fnames
def create_confusion_map(embedding_a, target_class):
# n_classes = len(embedding_a.Prediction.unique())
pred = embedding_a.Prediction.values
label = embedding_a.Label.values
label = (label)
label = np.array(label, dtype=np.uint8)
conf = [f'{label[i]}{pred[i]}' for i in range(len(label))]
return embedding_a.assign(Confusion=conf)
text_style = dict(color='#444', fontFamily='sans-serif', fontWeight=300)
dataLoader, dataset = get_dataloader(batch_size, patch_size, db_path)
if use_existing is True:
embedding_a = pd.read_csv(file_name)
else:
# model is not saved to variable to enable garbage collector to clean it after it is not used anymore
embedding, all_labels, all_predictions, dataset, fnames = load_embedding(
dataLoader, *load_model(model_path))
embedding_a = pd.DataFrame({"x": embedding[:, 0],
"y": embedding[:, 1],
"z": embedding[:, 2],
"Label": all_labels,
"Prediction": all_predictions,
"index": [*range(len(all_labels))],
"Slide": [i[:i.find(re.findall('[A-Za-z\.\s\_]*$', i)[0])] for i in fnames]})
embedding_a.to_csv(file_name)
embedding_a = create_confusion_map(embedding_a, target_class)
def plotly_figure(value, plot_type='2D'):
colors = color_pallete(len(embedding_a[value].unique()))
label_to_type = {'2D': 'scattergl', '3D': 'scatter3d'}
type_to_size = {'2D': 15, '3D': 2.5}
linesize = {'2D': 0.5, '3D': 0}
return {
'data': [dict(
x=embedding_a[embedding_a[value] == target]['x'],
y=embedding_a[embedding_a[value] == target]['y'],
z=embedding_a[embedding_a[value] == target]['z'],
text=embedding_a[embedding_a[value] == target]['index'],
index=embedding_a[embedding_a[value] == target]['index'],
customdata=embedding_a[embedding_a[value] == target]['index'],
mode='markers',
type=label_to_type[plot_type],
name=f'{target}',
marker={
'size': type_to_size[plot_type],
'opacity': 0.5,
'color': colors[i],
'line': {'width': linesize[plot_type], 'color': 'white'}
}
) for i, target in enumerate(sorted(embedding_a[value].unique()))],
'layout': dict(
xaxis={
'title': "x",
'type': 'linear'
},
yaxis={
'title': "y",
'type': 'linear'
},
margin={'l': 40, 'b': 30, 't': 10, 'r': 0},
height=750,
width=850,
hovermode='closest',
clickmode='event+select',
uirevision='no reset of zoom',
legend={'itemsizing': 'constant'}
)
}
app.layout = html.Div([
html.H2('CNN Classification Viewer', style=text_style),
# dcc.Input(id='predictor', placeholder='box', value=''),
html.Div([
html.Div([
dcc.RadioItems(
id='color-plot1',
options=[{'label': i, 'value': i}
for i in ['Label', 'Prediction', 'Confusion', 'Slide']],
value='Label',
labelStyle={}
),
dcc.RadioItems(
id='plot-type',
options=[{'label': i, 'value': i}
for i in ['2D', '3D']],
value='2D',)], style={'width': '49%', 'display': 'inline'}), # , 'float': 'left', 'display': 'inline-block'}),
html.Div([
dcc.Graph(id='plot1', figure=plotly_figure('Label'))
], style={'float': 'left', 'display': 'inline-block'}),
html.Div([
html.Div([html.Img(id='image', width=patch_size, height=patch_size)], style={'display': 'inline-block'}),
dcc.Markdown(d("""
**Image Properties**
""")),
html.Pre(id='hover-data'),
dcc.Markdown(d("""
**Frequency in selected**
""")),
html.Pre(id='selected-data')
], style={'float': 'left', 'display': 'inline-block'}, className='three columns'),
])], style={'width': '65%'})
@app.callback(
Output('selected-data', 'children'),
[Input('plot1', 'selectedData')])
def display_selected_data(selectedData):
text = ""
if selectedData is not None:
indices = | pd.DataFrame.from_dict(selectedData['points']) | pandas.DataFrame.from_dict |
import pandas as pd
from LS_STM import LSSTM
from data_makers import make_data_stm
from aiding_functions import load_obj
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
raw_data = pd.read_csv('./finance_data/raw_data.csv')
raw_data['Date'] = | pd.to_datetime(raw_data['Date'], format='%Y-%m-%d') | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import itertools
import json
import operator
import os
from pathlib import Path
from pprint import pprint
import re
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
from tqdm.notebook import tqdm
get_ipython().run_line_magic('matplotlib', 'inline')
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png')
# ## Load data and preprocess
# ### Metadata
# In[ ]:
# Map from test suite tag to high-level circuit.
circuits = {
"Licensing": ["npi", "reflexive"],
"Long-Distance Dependencies": ["fgd", "cleft"],
"Agreement": ["number"],
"Garden-Path Effects": ["npz", "mvrr"],
"Gross Syntactic State": ["subordination"],
"Center Embedding": ["center"],
}
tag_to_circuit = {tag: circuit
for circuit, tags in circuits.items()
for tag in tags}
# In[ ]:
# Map codenames to readable names for various columns.
def format_pretrained(model_name):
return "%s$^*$" % model_name
PRETTY_COLUMN_MAPS = [
("model_name",
{
"vanilla": "LSTM",
"ordered-neurons": "ON-LSTM",
"rnng": "RNNG",
"ngram": "n-gram",
"random": "Random",
"gpt-2-pretrained": format_pretrained("GPT-2"),
"gpt-2-xl-pretrained": format_pretrained("GPT-2-XL"),
"gpt-2": "GPT-2",
"transformer-xl": format_pretrained("Transformer-XL"),
"grnn": format_pretrained("GRNN"),
"jrnn": format_pretrained("JRNN"),
}),
("corpus", lambda x: x.upper() if x else "N/A"),
]
PRETTY_COLUMNS = ["pretty_%s" % col for col, _ in PRETTY_COLUMN_MAPS]
# In[ ]:
# Exclusions
exclude_suite_re = re.compile(r"^fgd-embed[34]|^gardenpath|^nn-nv")
exclude_models = ["1gram", "ngram-no-rand"] # "ngram",
# In[ ]:
ngram_models = ["1gram", "ngram", "ngram-single"]
baseline_models = ["random"]
# Models for which we designed a controlled training regime
controlled_models = ["ngram", "ordered-neurons", "vanilla", "rnng", "gpt-2"]
controlled_nonbpe_models = ["ngram", "ordered-neurons", "vanilla", "rnng"]
# ### Load
# In[ ]:
ppl_data_path = Path("../data/raw/perplexity.csv")
test_suite_results_path = Path("../data/raw/sg_results")
# In[ ]:
perplexity_df = pd.read_csv(ppl_data_path, index_col=["model", "corpus", "seed"])
perplexity_df.index.set_names("model_name", level=0, inplace=True)
results_df = pd.concat([pd.read_csv(f) for f in test_suite_results_path.glob("*.csv")])
# Split model_id into constituent parts
model_ids = results_df.model.str.split("_", expand=True).rename(columns={0: "model_name", 1: "corpus", 2: "seed"})
results_df = pd.concat([results_df, model_ids], axis=1).drop(columns=["model"])
results_df["seed"] = results_df.seed.fillna("0").astype(int)
# Add tags
results_df["tag"] = results_df.suite.transform(lambda s: re.split(r"[-_0-9]", s)[0])
results_df["circuit"] = results_df.tag.map(tag_to_circuit)
tags_missing_circuit = set(results_df.tag.unique()) - set(tag_to_circuit.keys())
if tags_missing_circuit:
print("Tags missing circuit: ", ", ".join(tags_missing_circuit))
# In[ ]:
# Exclude test suites
exclude_filter = results_df.suite.str.contains(exclude_suite_re)
print("Dropping %i results / %i suites due to exclusions:"
% (exclude_filter.sum(), len(results_df[exclude_filter].suite.unique())))
print(" ".join(results_df[exclude_filter].suite.unique()))
results_df = results_df[~exclude_filter]
# Exclude models
exclude_filter = results_df.model_name.isin(exclude_models)
print("Dropping %i results due to dropping models:" % exclude_filter.sum(), list(results_df[exclude_filter].model_name.unique()))
results_df = results_df[~exclude_filter]
# Exclude word-level controlled models with BPE tokenization
exclude_filter = (results_df.model_name.isin(controlled_nonbpe_models)) & (results_df.corpus.str.endswith("bpe"))
results_df = results_df[~exclude_filter]
# Exclude GPT-2 with word-level or SentencePieceBPE tokenization
exclude_filter = ((results_df.model_name=="gpt-2") & ~(results_df.corpus.str.endswith("gptbpe")))
results_df = results_df[~exclude_filter]
# In[ ]:
# Average across seeds of each ngram model.
# The only difference between "seeds" of these model types are random differences in tie-breaking decisions.
for ngram_model in ngram_models:
# Create a synthetic results_df with one ngram model, where each item is correct if more than half of
# the ngram seeds vote.
ngram_results_df = (results_df[results_df.model_name == ngram_model].copy()
.groupby(["model_name", "corpus", "suite", "item", "tag", "circuit"])
.agg({"correct": "mean"}) > 0.5).reset_index()
ngram_results_df["seed"] = 0
# Drop existing model results.
results_df = pd.concat([results_df[~(results_df.model_name == ngram_model)],
ngram_results_df], sort=True)
# In[ ]:
# Prettify name columns, which we'll carry through data manipulations
for column, map_fn in PRETTY_COLUMN_MAPS:
pretty_column = "pretty_%s" % column
results_df[pretty_column] = results_df[column].map(map_fn)
if results_df[pretty_column].isna().any():
print("WARNING: In prettifying %s, yielded NaN values:" % column)
print(results_df[results_df[pretty_column].isna()])
# ### Data prep
# In[ ]:
suites_df = results_df.groupby(["model_name", "corpus", "seed", "suite"] + PRETTY_COLUMNS).correct.mean().reset_index()
suites_df["tag"] = suites_df.suite.transform(lambda s: re.split(r"[-_0-9]", s)[0])
suites_df["circuit"] = suites_df.tag.map(tag_to_circuit)
# For controlled evaluation:
# Compute a model's test suite accuracy relative to the mean accuracy on this test suite.
# Only compute this on controlled models.
def get_controlled_mean(suite_results):
# When computing test suite mean, first collapse test suite accuracies within model--corpus, then combine resulting means.
return suite_results[suite_results.model_name.isin(controlled_models)].groupby(["model_name", "corpus"]).correct.mean().mean()
suite_means = suites_df.groupby("suite").apply(get_controlled_mean)
suites_df["correct_delta"] = suites_df.apply(lambda r: r.correct - suite_means.loc[r.suite] if r.model_name in controlled_models else None, axis=1)
# In[ ]:
# We'll save this data to a CSV file for access from R, where we do
# linear mixed-effects regression modeling.
suites_df.to_csv("../data/suites_df.csv")
# In[ ]:
# Join PPL and accuracy data.
joined_data = suites_df.groupby(["model_name", "corpus", "seed"] + PRETTY_COLUMNS)[["correct", "correct_delta"]].agg("mean")
joined_data = pd.DataFrame(joined_data).join(perplexity_df).reset_index()
joined_data.head()
# Track BPE + size separately.
joined_data["corpus_size"] = joined_data.corpus.str.split("-").apply(lambda tokens: tokens[1] if len(tokens) >= 2 else None)
joined_data["corpus_bpe"] = joined_data.corpus.str.split("-").apply(lambda tokens: tokens[2] if len(tokens) > 2 else ("none" if len(tokens) >= 2 else None))
# In[ ]:
# Join PPL and accuracy data, splitting on circuit.
joined_data_circuits = suites_df.groupby(["model_name", "corpus", "seed", "circuit"] + PRETTY_COLUMNS)[["correct", "correct_delta"]].agg("mean")
joined_data_circuits = | pd.DataFrame(joined_data_circuits) | pandas.DataFrame |
# BSD 3-Clause License
#
# Copyright (c) 2019, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Run this module first thing, to test your installation of romcomma.
**Contents**:
**predict**: Prediction using a GaussianBundle.
**test_input**: A rudimentary test input, for installation testing.
"""
from romcomma import distribution, function, data, model
from romcomma.typing_ import NP, Tuple
from numpy import zeros, eye, pi, full, array, transpose, diag, sign, ones, atleast_2d, abs, floor_divide, count_nonzero, mean, sqrt, \
concatenate, savetxt, loadtxt
from numpy.linalg import norm, eigvalsh
from pathlib import Path
from pandas import concat, DataFrame, read_csv
from json import load
import shutil
from time import time
EFFECTIVELY_ZERO = 1.0E-64
BASE_PATH = Path('X:\\comma_group1\\Rom\\dat\\TestFunctions\\ROM\\0')
RESULTS_PATH = BASE_PATH / "results"
DOC_PATH = Path('X:\\comma_group1\\Rom\\doc\\Papers\\romgp-paper-1')
K = 2
def linear_transformation(model_dir: Path) -> NP.Matrix:
with open(model_dir / "__meta__.json", mode='r') as file:
meta = load(file)
function_with_parameters = meta['origin']['functions_with_parameters'][0].split("; matrix=")
if len(function_with_parameters) > 1:
function_with_parameters = eval(function_with_parameters[-1][:-1])
return array(function_with_parameters)
else:
return ones((meta['data']['M'], meta['data']['M']), dtype=float)
def _random_str(random: bool) -> str:
return "random" if random else "rom"
def store_path(test_function: str, N: int, noise_std: float, random: bool, M: int = 5) -> Path:
return BASE_PATH / (test_function + '.{0:d}.{1:.3f}.{2:d}.'.format(M, noise_std, N) + _random_str(random))
def choose_Mu(test_function: str) -> int:
if test_function == "sobol_g":
return 3
elif test_function == "ishigami":
return 3
elif test_function == "sin.2":
return 2
else:
return 1
# def _run_test(test_function: str, N: int, noise_std: float, random: bool, gps: Tuple[str, ...], M: int):
# store = store_path(test_function, N, noise_std, random, M)
# Mu = choose_Mu(test_function)
# kernel_parameters = model.gpy_.Kernel.ExponentialQuadratic.Parameters(lengthscale=full((1, Mu), 0.2, dtype=float))
# parameters = model.gpy_.GP.DEFAULT_PARAMETERS._replace(kernel=kernel_parameters, e_floor=1E-5, e=1E-10)
# for k in range(K):
# fold = data.Fold(store, k, Mu)
# for gp in gps:
# dst = fold.dir / "{0}.reduced".format(gp)
# if dst.exists():
# shutil.rmtree(dst)
# shutil.copytree(src=fold.dir / gp, dst=dst)
# gp = model.gpy_.GP(fold, dst.name, parameters)
# gp.optimize()
# gp.test()
# model.gpy_.Sobol(gp)
# gp = None
def _run_test(test_function: str, N: int, noise_std: float, random: bool, M: int):
store = data.Store(store_path(test_function, N, noise_std, random, M))
Mu = choose_Mu(test_function)
gp_optimizer_options = {'optimizer': 'bfgs', 'max_iters': 5000, 'gtol': 1E-16}
kernel_parameters = model.gpy_.Kernel.ExponentialQuadratic.Parameters(lengthscale=full((1, 1), 2.5**(M/5), dtype=float))
parameters = model.gpy_.GP.DEFAULT_PARAMETERS._replace(kernel=kernel_parameters, e_floor=1E-6, e=0.003)
name = 'rom.reduced'
model.run.GPs(module=model.run.Module.GPY_, name=name, store=store, M_Used=Mu, parameters=parameters, optimize=True, test=True, sobol=True,
optimizer_options=gp_optimizer_options)
model.run.GPs(module=model.run.Module.GPY_, name=name, store=store, M_Used=Mu, parameters=None, optimize=True, test=True, sobol=True,
optimizer_options=gp_optimizer_options, make_ard=True)
def run_tests(test_functions: Tuple[str, ...], Ns: Tuple[int, ...], noise_stds: Tuple[float, ...], randoms: Tuple[bool, ...],
gps: Tuple[str, ...], Ms: Tuple[int, ...] = (5, )):
for M in Ms:
for N in Ns:
for test_function in test_functions:
for noise_std in noise_stds:
for random in randoms:
_run_test(test_function, N, noise_std, random, M)
def _test_stats(k: int, gp_path: Path) -> data.Frame:
test = data.Frame(gp_path / "__test__.csv").df.copy()
Y = test['Y'].values
mean_ = test['Predictive Mean'].values
std = test['Predictive Std'].values
err = abs(Y - mean_)
outliers = floor_divide(err, 2 * std)
df = DataFrame({'fold': k, 'RMSE': sqrt(mean(err ** 2)) / 4, 'Prediction Std': mean(std),
'Outliers': count_nonzero(outliers) / len(std)}, index=[0])
return data.Frame(gp_path / "test_stats.csv", df)
def _collect_test_stats(test_function: str, N: int, noise_std: float, random: bool, gps: Tuple[str, ...], M: int):
store = store_path(test_function, N, noise_std, random, M)
for k in range(K):
fold = data.Fold(store, k)
for gp in gps:
gp_path = fold.dir / gp
frame = _test_stats(k, gp_path)
def collect_tests(test_functions: Tuple[str, ...], Ns: Tuple[int, ...], noise_stds: Tuple[float, ...], randoms: Tuple[bool, ...],
gps: Tuple[str, ...], Ms: Tuple[int, ...] = (5, )):
for M in Ms:
for N in Ns:
for test_function in test_functions:
for noise_std in noise_stds:
for random in randoms:
_collect_test_stats(test_function, N, noise_std, random, gps, M)
def _collect_std(test_function: str, N: int, noise_std: float, random: bool, M: int):
store = data.Store(store_path(test_function, N, noise_std, random, M))
destination = store.dir / "results"
shutil.rmtree(destination, ignore_errors=True)
destination.mkdir(mode=0o777, parents=True, exist_ok=False)
result = 0.0
for k in range(K):
fold = data.Fold(store, k)
result += fold.standard.df.iloc[-1, -1]/K
savetxt(fname=(destination / "std.csv"), X=atleast_2d(result), delimiter=",")
def _collect_result(test_function: str, N: int, noise_std: float, random: bool, gps: Tuple[str, ...], M: int):
store = store_path(test_function, N, noise_std, random, M)
destination = store / "results"
destination.mkdir(mode=0o777, parents=True, exist_ok=True)
for gp in gps:
for sobol in (True, False):
if sobol:
lin_trans = linear_transformation(store)
frame = data.Frame(destination / "{0}.{1}".format(gp, "True_Theta.csv"), DataFrame(lin_trans))
lin_trans = transpose(lin_trans)
params = ("Theta.csv", "S.csv", "S1.csv")
else:
params = ("lengthscale.csv", "e.csv", "f.csv", "log_likelihood.csv", "test_stats.csv")
for param in params:
results = None
avg = None
for k in range(K):
source = (store / "fold.{0:d}".format(k)) / gp
source = source / "sobol" if sobol else source / "kernel" if param == "lengthscale.csv" else source
result = data.Frame(source / param, **model.base.Model.CSV_PARAMETERS).df.copy(deep=True)
result.insert(0, "fold", full(result.shape[0], k), True)
if k == 0:
results = result
avg = result / K
else:
results = | concat([results, result], axis=0, ignore_index=True, sort=False) | pandas.concat |
'''Tests for preprocess.py. '''
import pytest
import pandas as pd
import numpy as np
from titanic import preprocess
#
##### FIXTURES ####################################################################################
#
COLUMNS = ['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked']
@pytest.fixture
def test_df_one_line():
data = [[0, 1, 1, "Surname, Title Name", "male", 30, 1, 0, "113803", 7.25, np.nan, "C"]]
return pd.DataFrame(data=data, columns=COLUMNS)
@pytest.fixture
def test_df_five_lines():
data = [[0, 0, 3, "<NAME>", "male", 30, 2, 2, "113803", 7.25, np.nan, "S"],
[1, 1, 3, "<NAME>", "female", 27, 2, 2, "123456", 9.25, np.nan, "S"],
[2, 1, 1, "Smith, <NAME>", "male", 45, 1, 0, "113803", 40, np.nan, "C"],
[3, 0, 1, "<NAME>", "female", 40, 1, 0, "113803", 40, np.nan, "C"],
[4, 1, 2, "<NAME>", "male", np.nan, 0, 0, "113803", 9, np.nan, "S"]]
return | pd.DataFrame(data=data, columns=COLUMNS) | pandas.DataFrame |
import sys
import traceback
import warnings
from . import jhu
import mechbayes.models.SEIRD
import pandas as pd
import matplotlib.pyplot as plt
import numpy as onp
import jax
import jax.numpy as np
from jax.random import PRNGKey
import numpyro
from numpyro.infer import MCMC, NUTS, Predictive
from pathlib import Path
import cachetools
import scipy
import scipy.stats
from .compartment import SEIRModel
from tqdm import tqdm
"""
************************************************************
Data
************************************************************
"""
def load_country_data():
countries = jhu.load_countries()
info = jhu.get_country_info()
names = set(info.index) & set(countries.columns.unique(level=0))
country_data = {
k: {'data' : countries[k].copy(),
'pop' : info.loc[k, 'Population'],
'name' : info.loc[k, 'name']}
for k in names
}
return country_data
def load_state_data():
states = jhu.load_us_states()
info = jhu.get_state_info()
names = set(info.index) & set(states.columns.unique(level=0))
data = {
k : {'data': states[k].copy(),
'pop': info.loc[k, 'Population'],
'name': info.loc[k, 'name']
}
for k in names
}
return data
def load_county_data():
US = jhu.load_us_counties()
info = jhu.get_county_info()
counties = set(info.index) & set(US.columns.unique(level=0))
data = {
k : {'data': US[k].copy(),
'pop': info.loc[k, 'Population'],
'name': info.loc[k, 'name']
}
for k in counties
}
return data
def load_data():
state_data = load_state_data()
country_data = load_country_data()
county_data = load_county_data()
return dict(country_data, **state_data, **county_data)
def redistribute(df, date, n, k, col='death'):
'''Redistribute n incident cases/deaths to previous k days'''
# Note: modifies df in place
# e.g., 100 incident deaths happen on day t
# --> n/k incident deaths on days t-k+1, t-k+2, ..., t
# --> n/3 incident deaths on days t-2, t-1, 2
#
# the cumulative number by day t does not change
ndays = onp.abs(k)
a = n // ndays
b = n % ndays
new_incident = a * onp.ones(ndays)
new_incident[:b] += 1
date = pd.to_datetime(date)
if k > 0:
new_incident = onp.concatenate([new_incident, [-n]])
new_cumulative = onp.cumsum(new_incident)
end = date
start = date - pd.Timedelta('1d') * ndays
else:
new_incident = onp.concatenate([[-n], new_incident])
new_cumulative = onp.cumsum(new_incident)
start = date
end = date + pd.Timedelta('1d') * ndays
days = pd.date_range(start=start, end=end)
#days = pd.date_range(end=date-pd.Timedelta('1d'), periods=k-1)
df.loc[days, col] += new_cumulative
def set_trailing_weekend_zeros_to_missing(data,
forecast_date,
no_sunday_data_places,
no_weekend_data_places):
# Set trailing zeros to missing for places that
# don't report on weekends.
#
# This could potentially be more automated, but
# there are issues to understand and constraints
# on potential solutions:
#
# - there may be true zeros. this is rare for
# cases, but fairly common for deaths.
#
# - we may want to set non-zero values to missing,
# e.g., for US where counts are very low on
# weekends due to most states not reporting
#
# - sometimes it is useful to manually adjust
# whether weekend data is present to help fix
# model fitting failures or very poor fits
#
# Note that we generally don't want to set _non-trailing_
# values to missing
#
# - The raw data is cumulative; setting one value
# to nan in the middle of the time series will
# lead to multiple nans after differencing to get
# incident data.
#
# - Missing cases/deaths from weekends are reported later,
# e.g., Monday is often a big spike. In the long run, we want
# low counts on weekends to offset larger counts during the
# week to get incidence correct at the weekly level.
forecast_date = pd.to_datetime(forecast_date)
# Changes data in place. no return value
if forecast_date.dayofweek == 6: # forecast on sunday
sunday = forecast_date
saturday = forecast_date - pd.Timedelta('1d')
for place in no_sunday_data_places + no_weekend_data_places:
data[place]['data'].loc[sunday, :] = onp.nan
for place in no_weekend_data_places:
data[place]['data'].loc[saturday, :] = onp.nan
elif forecast_date.dayofweek == 5: # forecast on saturday
saturday = forecast_date
for place in no_weekend_data_places:
data[place]['data'].loc[saturday, :] = onp.nan
def smooth_to_weekly(data, forecast_date, place, var, start_date, end_date=None):
to_date = end_date or forecast_date
series = data[place]['data'][var]
reporting_dates = pd.date_range(start=start_date, end=to_date, freq=pd.Timedelta('1w'))
for reporting_date in reporting_dates:
# get total cases/deaths for week
right = reporting_date
left = reporting_date - pd.Timedelta("1w")
cum_tot = series[right]
weekly_tot = series[right] - series[left]
# construct incident time series with cases/deaths
# spread evenly through week
avg = int(weekly_tot // 7)
rem = int(weekly_tot % 7)
# set each day equal to floor(average)
incident = avg * onp.ones(7)
# add remainder by adding 1 for first rem days in scrambled order
scrambled_days = [3, 0, 6, 2, 5, 4, 1]
incident[scrambled_days[:rem]] += 1
# now reconstruct cumulative series from incident
series[left+pd.Timedelta("1d"):right] = series[left] + onp.cumsum(incident)
assert(series[right] - series[left] == weekly_tot)
assert(series[right] == cum_tot)
# if we didn't explicitly stop smoothing (e.g., because location
# went back to daily reporting), assume all observations after
# final weekly reporting date are missing
if end_date is None and len(reporting_dates) > 0:
last_reporting_date = reporting_dates[-1]
series[last_reporting_date + pd.Timedelta('1d'):] = onp.nan
"""
************************************************************
Plotting
************************************************************
"""
def plot_R0(mcmc_samples, start, ax=None):
ax = plt.axes(ax)
# Compute R0 over time
gamma = mcmc_samples['gamma'][:,None]
beta = mcmc_samples['beta']
t = pd.date_range(start=start, periods=beta.shape[1], freq='D')
R0 = beta/gamma
pi = onp.percentile(R0, (10, 90), axis=0)
df = pd.DataFrame(index=t, data={'R0': onp.median(R0, axis=0)})
df.plot(style='-o', ax=ax)
ax.fill_between(t, pi[0,:], pi[1,:], alpha=0.1)
ax.axhline(1, linestyle='--')
def plot_growth_rate(mcmc_samples, start, model=SEIRModel, ax=None):
ax = plt.axes(ax)
# Compute growth rate over time
beta = mcmc_samples['beta']
sigma = mcmc_samples['sigma'][:,None]
gamma = mcmc_samples['gamma'][:,None]
t = pd.date_range(start=start, periods=beta.shape[1], freq='D')
growth_rate = SEIRModel.growth_rate((beta, sigma, gamma))
pi = onp.percentile(growth_rate, (10, 90), axis=0)
df = pd.DataFrame(index=t, data={'growth_rate': onp.median(growth_rate, axis=0)})
df.plot(style='-o', ax=ax)
ax.fill_between(t, pi[0,:], pi[1,:], alpha=0.1)
ax.axhline(0, linestyle='--')
"""
************************************************************
Running
************************************************************
"""
def run_place(data,
place,
model_type=mechbayes.models.SEIRD.SEIRD,
start = '2020-03-04',
end = None,
save = True,
init_values = None,
num_warmup = 1000,
num_samples = 1000,
num_chains = 1,
num_prior_samples = 0,
T_future=4*7,
prefix = "results",
resample_low=0,
resample_high=100,
save_fields=['beta0', 'beta', 'sigma', 'gamma', 'dy0', 'dy', 'dy_future', 'dz0', 'dz', 'dz_future', 'y0', 'y', 'y_future', 'z0', 'z', 'z_future' ],
**kwargs):
numpyro.enable_x64()
print(f"Running {place} (start={start}, end={end})")
place_data = data[place]['data'][start:end]
T = len(place_data)
model = model_type(
data = place_data,
T = T,
N = data[place]['pop'],
**kwargs
)
print(" * running MCMC")
mcmc_samples = model.infer(num_warmup=num_warmup,
num_samples=num_samples,
init_values=init_values)
if resample_low > 0 or resample_high < 100:
print(" * resampling")
mcmc_samples = model.resample(low=resample_low, high=resample_high, **kwargs)
# Prior samples
prior_samples = None
if num_prior_samples > 0:
print(" * collecting prior samples")
prior_samples = model.prior(num_samples=num_prior_samples)
# In-sample posterior predictive samples (don't condition on observations)
print(" * collecting in-sample predictive samples")
post_pred_samples = model.predictive()
# Forecasting posterior predictive (do condition on observations)
print(" * collecting forecast samples")
forecast_samples = model.forecast(T_future=T_future)
if save:
# Save samples
path = Path(prefix) / 'samples'
path.mkdir(mode=0o775, parents=True, exist_ok=True)
filename = path / f'{place}.npz'
save_samples(filename,
prior_samples,
mcmc_samples,
post_pred_samples,
forecast_samples,
save_fields=save_fields)
path = Path(prefix) / 'summary'
path.mkdir(mode=0o775, parents=True, exist_ok=True)
filename = path / f'{place}.txt'
write_summary(filename, model.mcmc)
def save_samples(filename,
prior_samples,
mcmc_samples,
post_pred_samples,
forecast_samples,
save_fields=None):
def trim(d):
if d is not None:
d = {k : v for k, v in d.items() if k in save_fields}
return d
file_exists = filename.exists()
onp.savez_compressed(filename,
prior_samples = trim(prior_samples),
mcmc_samples = trim(mcmc_samples),
post_pred_samples = trim(post_pred_samples),
forecast_samples = trim(forecast_samples))
if not file_exists:
filename.chmod(0o664)
def write_summary(filename, mcmc):
# Write diagnostics to file
file_exists = filename.exists()
orig_stdout = sys.stdout
with open(filename, 'w') as f:
sys.stdout = f
mcmc.print_summary()
sys.stdout = orig_stdout
if not file_exists:
filename.chmod(0o664)
def load_samples(filename):
x = np.load(filename, allow_pickle=True)
prior_samples = x['prior_samples'].item()
mcmc_samples = x['mcmc_samples'].item()
post_pred_samples = x['post_pred_samples'].item()
forecast_samples = x['forecast_samples'].item()
return prior_samples, mcmc_samples, post_pred_samples, forecast_samples
def gen_forecasts(data,
place,
model_type=mechbayes.models.SEIRD.SEIRD,
start = '2020-03-04',
end=None,
save = True,
show = True,
prefix='results',
**kwargs):
# Deal with paths
samples_path = Path(prefix) / 'samples'
vis_path = Path(prefix) / 'vis'
vis_path.mkdir(parents=True, exist_ok=True)
model = model_type()
confirmed = data[place]['data'].confirmed[start:end]
death = data[place]['data'].death[start:end]
T = len(confirmed)
N = data[place]['pop']
filename = samples_path / f'{place}.npz'
_, mcmc_samples, post_pred_samples, forecast_samples = load_samples(filename)
for daily in [False, True]:
for scale in ['log', 'lin']:
for T in [28]:
fig, axes = plt.subplots(nrows = 2, figsize=(8,12), sharex=True)
if daily:
variables = ['dy', 'dz']
observations = [confirmed.diff(), death.diff()]
else:
variables = ['y', 'z']
observations= [confirmed, death]
for variable, obs, ax in zip(variables, observations, axes):
model.plot_forecast(variable,
post_pred_samples,
forecast_samples,
start,
T_future=T,
obs=obs,
ax=ax,
scale=scale)
name = data[place]['name']
plt.suptitle(f'{name} {T} days ')
plt.tight_layout()
if save:
filename = vis_path / f'{place}_scale_{scale}_daily_{daily}_T_{T}.png'
plt.savefig(filename)
if show:
plt.show()
fig, ax = plt.subplots(figsize=(5,4))
plot_growth_rate(mcmc_samples, start, ax=ax)
plt.title(place)
plt.tight_layout()
if save:
filename = vis_path / f'{place}_R0.png'
file_exists = filename.exists()
plt.savefig(filename)
if not file_exists:
filename.chmod(0o664)
if show:
plt.show()
"""
************************************************************
Performance metrics
************************************************************
"""
def construct_daily_df(forecast_date, forecast_samples, target, truth_data=None, pad_strategy="shift"):
# Construct df indexed by time with samples in columns
# - starts one day after forecast date (usually Monday)
t = pd.date_range(start=forecast_date + pd.Timedelta("1d"),
periods=forecast_samples.shape[1],
freq='D')
daily_df = pd.DataFrame(index=t, data=np.transpose(forecast_samples))
# For incident forecasts made on Sunday, pad to include a value for Sunday
# so the first week is complete. This does not apply to forecasts made on
# other days because:
#
# -- we will never submit a forecast on Monday for the current week,
# because the data is not available until ~midnight on Monday
#
# -- forecasts submitted on Tuesday--Thursday are for the following week
#
if target.startswith("inc") and forecast_date.dayofweek == 6:
if pad_strategy == "shift":
daily_df.index -= pd.Timedelta("1d")
elif pad_strategy == "truth":
if truth_data is None:
raise ValueError("Must supply truth_data with pad_strategy='truth'")
sunday = forecast_date
saturday = sunday - pd.Timedelta("1d")
truth_val = np.maximum(truth_data.loc[sunday] - truth_data.loc[saturday], 0.)
new_row = pd.DataFrame([], index=[sunday])
daily_df = pd.concat([new_row, daily_df], ignore_index=False)
daily_df.loc[sunday, :] = truth_val
else:
raise ValueError(f"Unsuported pad_strategy {pad_strategy}")
# Always starts on forecast date
return daily_df
def resample_to_weekly(daily_df, target, full_weeks=True, label="left"):
if target.startswith("inc"):
if full_weeks:
# truncate to start on Sunday and end on Saturday before aggregating
start = daily_df.index[0]
end = daily_df.index[-1]
first_sunday = start if start.dayofweek==6 else start + pd.offsets.Week(weekday=6)
final_saturday = end if end.dayofweek==5 else end - pd.offsets.Week(weekday=5)
daily_df = daily_df.loc[first_sunday:final_saturday]
weekly_df = daily_df.resample("1w", closed='left', label=label).sum()
elif target.startswith("cum"):
if full_weeks:
# truncate end on Saturday before aggregating
end = daily_df.index[-1]
final_saturday = end if end.dayofweek==5 else end - pd.offsets.Week(weekday=5)
daily_df = daily_df.loc[:final_saturday]
weekly_df = daily_df.resample("1w", closed='left', label=label).last()
else:
raise ValueError(f"uncrecognized target {target}")
weekly_df[weekly_df < 0.] = 0.
return weekly_df
def score_place(forecast_date,
data,
place,
model_type=mechbayes.models.SEIRD.SEIRD,
prefix="results",
target="cum death",
freq="week",
periods=None,
pad_strategy="shift"):
'''Gives performance metrics for each time horizon for one place'''
if target == 'cum death':
forecast_field = 'z'
obs_field = 'death'
elif target == 'inc death':
forecast_field = 'dz'
obs_field = 'death'
elif target == 'cum case':
forecast_field = 'y'
obs_field = 'confirmed'
elif target == 'inc case':
forecast_field = 'dy'
obs_field = 'confirmed'
else:
raise ValueError(f"Invalid or unsupported target {target}")
filename = Path(prefix) / 'samples' / f'{place}.npz'
prior_samples, mcmc_samples, post_pred_samples, forecast_samples = \
load_samples(filename)
model = model_type()
forecast_date = pd.to_datetime(forecast_date)
# Get observed values for forecast period
if target.startswith('cum'):
start = forecast_date + pd.Timedelta("1d")
obs = data[place]['data'][obs_field][start:]
elif target.startswith('inc') and forecast_date.dayofweek==6:
# For incident forecasts made on Sunday, also get the Sunday
# truth data, because we will pad forecasts to include Sunday
start = forecast_date
obs = data[place]['data'][obs_field].diff()[start:] # incident
elif target.startswitch('inc'):
start = forecast_date + | pd.Timedelta("1d") | pandas.Timedelta |
#SPDX-License-Identifier: MIT
""" Helper methods constant across all workers """
import requests
import datetime
import time
import traceback
import json
import os
import sys
import math
import logging
import numpy
import copy
import concurrent
import multiprocessing
import psycopg2
import psycopg2.extensions
import csv
import io
from logging import FileHandler, Formatter, StreamHandler
from multiprocessing import Process, Queue, Pool, Value
from os import getpid
import sqlalchemy as s
import pandas as pd
from pathlib import Path
from urllib.parse import urlparse, quote
from sqlalchemy.ext.automap import automap_base
from augur.config import AugurConfig
from augur.logging import AugurLogging
from sqlalchemy.sql.expression import bindparam
from concurrent import futures
import dask.dataframe as dd
class Persistant():
ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def __init__(self, worker_type, data_tables=[],operations_tables=[]):
self.db_schema = None
self.helper_schema = None
self.worker_type = worker_type
#For database functionality
self.data_tables = data_tables
self.operations_tables = operations_tables
self._root_augur_dir = Persistant.ROOT_AUGUR_DIR
# count of tuples inserted in the database ( to store stats for each task in op tables)
self.update_counter = 0
self.insert_counter = 0
self._results_counter = 0
# Update config with options that are general and not specific to any worker
self.augur_config = AugurConfig(self._root_augur_dir)
#TODO: consider taking parts of this out for the base class and then overriding it in WorkerGitInterfaceable
self.config = {
'worker_type': self.worker_type,
'host': self.augur_config.get_value('Server', 'host')
}
self.config.update(self.augur_config.get_section("Logging"))
try:
worker_defaults = self.augur_config.get_default_config()['Workers'][self.config['worker_type']]
self.config.update(worker_defaults)
except KeyError as e:
logging.warn('Could not get default configuration for {}'.format(self.config['worker_type']))
worker_info = self.augur_config.get_value('Workers', self.config['worker_type'])
self.config.update(worker_info)
worker_port = self.config['port']
while True:
try:
r = requests.get('http://{}:{}/AUGWOP/heartbeat'.format(
self.config['host'], worker_port)).json()
if 'status' in r:
if r['status'] == 'alive':
worker_port += 1
except:
break
#add credentials to db config. Goes to databaseable
self.config.update({
'port': worker_port,
'id': "workers.{}.{}".format(self.worker_type, worker_port),
'capture_output': False,
'location': 'http://{}:{}'.format(self.config['host'], worker_port),
'port_broker': self.augur_config.get_value('Server', 'port'),
'host_broker': self.augur_config.get_value('Server', 'host'),
'host_database': self.augur_config.get_value('Database', 'host'),
'port_database': self.augur_config.get_value('Database', 'port'),
'user_database': self.augur_config.get_value('Database', 'user'),
'name_database': self.augur_config.get_value('Database', 'name'),
'password_database': self.augur_config.get_value('Database', 'password')
})
# Initialize logging in the main process
self.initialize_logging()
# Clear log contents from previous runs
open(self.config["server_logfile"], "w").close()
open(self.config["collection_logfile"], "w").close()
# Get configured collection logger
self.logger = logging.getLogger(self.config["id"])
self.logger.info('Worker (PID: {}) initializing...'.format(str(os.getpid())))
#Return string representation of an object with all information needed to recreate the object (Think of it like a pickle made out of text)
#Called using repr(*object*). eval(repr(*object*)) == *object*
def __repr__(self):
return f"{self.config['id']}"
def initialize_logging(self):
#Get the log level in upper case from the augur config's logging section.
self.config['log_level'] = self.config['log_level'].upper()
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
if self.config['verbose']:
format_string = AugurLogging.verbose_format_string
else:
format_string = AugurLogging.simple_format_string
#Use stock python formatter for stdout
formatter = Formatter(fmt=format_string)
#User custom for stderr, Gives more info than verbose_format_string
error_formatter = Formatter(fmt=AugurLogging.error_format_string)
worker_dir = AugurLogging.get_log_directories(self.augur_config, reset_logfiles=False) + "/workers/"
Path(worker_dir).mkdir(exist_ok=True)
logfile_dir = worker_dir + f"/{self.worker_type}/"
Path(logfile_dir).mkdir(exist_ok=True)
#Create more complex sublogs in the logfile directory determined by the AugurLogging class
server_logfile = logfile_dir + '{}_{}_server.log'.format(self.worker_type, self.config["port"])
collection_logfile = logfile_dir + '{}_{}_collection.log'.format(self.worker_type, self.config["port"])
collection_errorfile = logfile_dir + '{}_{}_collection.err'.format(self.worker_type, self.config["port"])
self.config.update({
'logfile_dir': logfile_dir,
'server_logfile': server_logfile,
'collection_logfile': collection_logfile,
'collection_errorfile': collection_errorfile
})
collection_file_handler = FileHandler(filename=self.config['collection_logfile'], mode="a")
collection_file_handler.setFormatter(formatter)
collection_file_handler.setLevel(self.config['log_level'])
collection_errorfile_handler = FileHandler(filename=self.config['collection_errorfile'], mode="a")
collection_errorfile_handler.setFormatter(error_formatter)
collection_errorfile_handler.setLevel(logging.WARNING)
logger = logging.getLogger(self.config['id'])
logger.handlers = []
logger.addHandler(collection_file_handler)
logger.addHandler(collection_errorfile_handler)
logger.setLevel(self.config['log_level'])
logger.propagate = False
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
console_handler = StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(self.config['log_level'])
logger.addHandler(console_handler)
if self.config['quiet']:
logger.disabled = True
self.logger = logger
#database interface, the git interfaceable adds additional function to the super method.
def initialize_database_connections(self):
DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format(
self.config['user_database'], self.config['password_database'], self.config['host_database'], self.config['port_database'], self.config['name_database']
)
# Create an sqlalchemy engine for both database schemas
self.logger.info("Making database connections")
self.db_schema = 'augur_data'
self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(self.db_schema)})
# , 'client_encoding': 'utf8'
self.helper_schema = 'augur_operations'
self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(self.helper_schema)})
metadata = s.MetaData()
helper_metadata = s.MetaData()
# Reflect only the tables we will use for each schema's metadata object
metadata.reflect(self.db, only=self.data_tables)
helper_metadata.reflect(self.helper_db, only=self.operations_tables)
Base = automap_base(metadata=metadata)
HelperBase = automap_base(metadata=helper_metadata)
Base.prepare()
HelperBase.prepare()
# So we can access all our tables when inserting, updating, etc
for table in self.data_tables:
setattr(self, '{}_table'.format(table), Base.classes[table].__table__)
try:
self.logger.info(HelperBase.classes.keys())
except:
pass
for table in self.operations_tables:
try:
setattr(self, '{}_table'.format(table), HelperBase.classes[table].__table__)
except Exception as e:
self.logger.error("Error setting attribute for table: {} : {}".format(table, e))
# Increment so we are ready to insert the 'next one' of each of these most recent ids
self.logger.info("Trying to find max id of table...")
try:
self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1
except Exception as e:
self.logger.info(f"Could not find max id. ERROR: {e}")
#25151
#self.logger.info(f"Good, passed the max id getter. Max id: {self.history_id}")
#Make sure the type used to store date is synced with the worker?
def sync_df_types(self, subject, source, subject_columns, source_columns):
type_dict = {}
## Getting rid of nan's and NoneTypes across the dataframe to start:
subject = subject.fillna(value=numpy.nan)
source = source.fillna(value=numpy.nan)
for index in range(len(source_columns)):
if type(source[source_columns[index]].values[0]) == numpy.datetime64:
subject[subject_columns[index]] = pd.to_datetime(
subject[subject_columns[index]], utc=True
)
source[source_columns[index]] = pd.to_datetime(
source[source_columns[index]], utc=True
)
continue
## Dealing with an error coming from paginate endpoint and the GitHub issue worker
### For a release in mid september, 2021. #SPG This did not work on Ints or Floats
# if type(source[source_columns[index]].values[0]).isnull():
# subject[subject_columns[index]] = pd.fillna(value=np.nan)
# source[source_columns[index]] = pd.fillna(value=np.nan)
# continue
source_index = source_columns[index]
try:
source_index = source_columns[index]
type_dict[subject_columns[index]] = type(source[source_index].values[0])
#self.logger.info(f"Source data column is {source[source_index].values[0]}")
#self.logger.info(f"Type dict at {subject_columns[index]} is : {type(source[source_index].values[0])}")
except Exception as e:
self.logger.info(f"Source data registered exception: {source[source_index]}")
self.print_traceback("", e, True)
subject = subject.astype(type_dict)
return subject, source
#Convert safely from sql type to python type?
def get_sqlalchemy_type(self, data, column_name=None):
if type(data) == str:
try:
time.strptime(data, "%Y-%m-%dT%H:%M:%SZ")
return s.types.TIMESTAMP
except ValueError:
return s.types.String
elif (
isinstance(data, (int, numpy.integer))
or (isinstance(data, float) and column_name and 'id' in column_name)
):
return s.types.BigInteger
elif isinstance(data, float):
return s.types.Float
elif type(data) in [numpy.datetime64, pd._libs.tslibs.timestamps.Timestamp]:
return s.types.TIMESTAMP
elif column_name and 'id' in column_name:
return s.types.BigInteger
return s.types.String
def _convert_float_nan_to_int(self, df):
for column in df.columns:
if (
df[column].dtype == float
and ((df[column] % 1 == 0) | (df[column].isnull())).all()
):
df[column] = df[column].astype("Int64").astype(object).where(
pd.notnull(df[column]), None
)
return df
def _setup_postgres_merge(self, data_sets, sort=False):
metadata = s.MetaData()
data_tables = []
# Setup/create tables
for index, data in enumerate(data_sets):
data_table = s.schema.Table(f"merge_data_{index}_{os.getpid()}", metadata)
df = pd.DataFrame(data)
columns = sorted(list(df.columns)) if sort else df.columns
df = self._convert_float_nan_to_int(df)
for column in columns:
data_table.append_column(
s.schema.Column(
column, self.get_sqlalchemy_type(
df.fillna(method='bfill').iloc[0][column], column_name=column
)
)
)
data_tables.append(data_table)
metadata.create_all(self.db, checkfirst=True)
# Insert data to tables
for data_table, data in zip(data_tables, data_sets):
self.bulk_insert(
data_table, insert=data, increment_counter=False, convert_float_int=True
)
session = s.orm.Session(self.db)
self.logger.info("Session created for merge tables")
return data_tables, metadata, session
def _close_postgres_merge(self, metadata, session):
session.close()
self.logger.info("Session closed")
# metadata.reflect(self.db, only=[new_data_table.name, table_values_table.name])
metadata.drop_all(self.db, checkfirst=True)
self.logger.info("Merge tables dropped")
def _get_data_set_columns(self, data, columns):
if not len(data):
return []
self.logger.info("Getting data set columns")
df = pd.DataFrame(data, columns=data[0].keys())
final_columns = copy.deepcopy(columns)
for column in columns:
if '.' not in column:
continue
root = column.split('.')[0]
if root not in df.columns:
df[root] = None
expanded_column = pd.DataFrame(
df[root].where(df[root].notna(), lambda x: [{}]).tolist()
)
expanded_column.columns = [
f'{root}.{attribute}' for attribute in expanded_column.columns
]
if column not in expanded_column.columns:
expanded_column[column] = None
final_columns += list(expanded_column.columns)
try:
df = df.join(expanded_column)
except ValueError:
# columns already added (happens if trying to expand the same column twice)
# TODO: Catch this before by only looping unique prefixs?
self.logger.info("Columns have already been added, moving on...")
pass
self.logger.info(final_columns)
self.logger.info(list(set(final_columns)))
self.logger.info("Finished getting data set columns")
return df[list(set(final_columns))].to_dict(orient='records')
def organize_needed_data(
self, new_data, table_values, action_map={}, in_memory=True
):
"""
This method determines which rows need to be inserted into the database (ensures data ins't inserted more than once)
and determines which rows have data that needs to be updated
:param new_data: list of dictionaries - needs to be compared with data in database to see if any updates are
needed or if the data needs to be inserted
:param table_values: list of SQLAlchemy tuples - data that is currently in the database
:param action_map: dict with two keys (insert and update) and each key's value contains a list of the fields
that are needed to determine if a row is unique or if a row needs to be updated
:param in_memory: boolean - determines whether the method is done is memory or database
(currently everything keeps the default of in_memory=True)
:return: list of dictionaries that contain data that needs to be inserted into the database
:return: list of dictionaries that contain data that needs to be updated in the database
"""
if len(table_values) == 0:
return new_data, []
if len(new_data) == 0:
return [], []
need_insertion = pd.DataFrame()
need_updates = pd.DataFrame()
if not in_memory:
new_data_columns = action_map['insert']['source']
table_value_columns = action_map['insert']['augur']
if 'update' in action_map:
new_data_columns += action_map['update']['source']
table_value_columns += action_map['update']['augur']
(new_data_table, table_values_table), metadata, session = self._setup_postgres_merge(
[
self._get_data_set_columns(new_data, new_data_columns),
self._get_data_set_columns(table_values, table_value_columns)
]
)
need_insertion = pd.DataFrame(session.query(new_data_table).join(table_values_table,
eval(
' and '.join([
f"table_values_table.c.{table_column} == new_data_table.c.{source_column}" \
for table_column, source_column in zip(action_map['insert']['augur'],
action_map['insert']['source'])
])
), isouter=True).filter(
table_values_table.c[action_map['insert']['augur'][0]] == None
).all(), columns=table_value_columns)
self.logger.info("need_insertion calculated successfully")
need_updates = pd.DataFrame(columns=table_value_columns)
if 'update' in action_map:
need_updates = pd.DataFrame(session.query(new_data_table).join(table_values_table,
s.and_(
eval(' and '.join([f"table_values_table.c.{table_column} == new_data_table.c.{source_column}" for \
table_column, source_column in zip(action_map['insert']['augur'], action_map['insert']['source'])])),
eval(' and '.join([f"table_values_table.c.{table_column} != new_data_table.c.{source_column}" for \
table_column, source_column in zip(action_map['update']['augur'], action_map['update']['source'])]))
) ).all(), columns=table_value_columns)
self.logger.info("need_updates calculated successfully")
self._close_postgres_merge(metadata, session)
new_data_df = pd.DataFrame(new_data)
need_insertion, new_data_df = self.sync_df_types(
need_insertion, new_data_df, table_value_columns, new_data_columns
)
need_insertion = need_insertion.merge(
new_data_df, how='inner', left_on=table_value_columns, right_on=new_data_columns
)
self.logger.info(
f"Table needs {len(need_insertion)} insertions and "
f"{len(need_updates)} updates.\n")
else:
#create panda tabluar data from the keys of the passed table values
table_values_df = pd.DataFrame(table_values, columns=table_values[0].keys())
new_data_df = pd.DataFrame(new_data).dropna(subset=action_map['insert']['source'])
new_data_df, table_values_df = self.sync_df_types(new_data_df, table_values_df,
action_map['insert']['source'], action_map['insert']['augur'])
#Throwing value errors. 'cannot use name of an existing column for indicator column'
'''
This is how uniqueness, or whether a piece of data needs to be inserted, or
if that data already exists.
With regards to the comment_action_map (for insertion of issue_comments and pull_request_comments
we need to recognize the following:
paginate_endpoint() then gets a dataframe of all the data that needs to be inserted.
Earlier, we added 'tool_source' to the augur side of the action map, and left
'id' alone on the source side (since tool_source) is our variable, and part of our
natural key.
--<NAME> and <NAME> 9/16/2021. Debugging duplicate insert errors for
comments after initial collection.
'''
try:
need_insertion = new_data_df.merge(table_values_df, suffixes=('','_table'),
how='outer', indicator=True, left_on=action_map['insert']['source'],
right_on=action_map['insert']['augur']).loc[lambda x : x['_merge']=='left_only']
except ValueError as e:
#Log the error, try to merge again without label to avoid ValueError
self.logger.warning(f"Error thrown during pandas merge: {e}")
need_insertion = new_data_df.merge(table_values_df, suffixes=('','_table'),
how='outer', indicator=False, left_on=action_map['insert']['source'],
right_on=action_map['insert']['augur']).loc[lambda x : x['_merge']=='left_only']
if 'update' in action_map:
new_data_df, table_values_df = self.sync_df_types(new_data_df, table_values_df,
action_map['update']['source'], action_map['update']['augur'])
partitions = math.ceil(len(new_data_df) / 1000)
attempts = 0
while attempts < 50:
try:
need_updates = pd.DataFrame()
self.logger.info(f"Trying {partitions} partitions\n")
for sub_df in numpy.array_split(new_data_df, partitions):
self.logger.info(f"Trying a partition, len {len(sub_df)}\n")
need_updates = pd.concat([ need_updates, sub_df.merge(table_values_df, left_on=action_map['insert']['source'],
right_on=action_map['insert']['augur'], suffixes=('','_table'), how='inner',
indicator=False).merge(table_values_df, left_on=action_map['update']['source'],
right_on=action_map['update']['augur'], suffixes=('','_table'), how='outer',
indicator=True).loc[lambda x : x['_merge']=='left_only'] ])
self.logger.info(f"need_updates merge: {len(sub_df)} worked\n")
break
except MemoryError as e:
self.logger.info(f"new_data ({sub_df.shape}) is too large to allocate memory for " +
f"need_updates df merge.\nMemoryError: {e}\nTrying again with {partitions + 1} partitions...\n")
partitions += 1
attempts += 1
# self.logger.info(f"End attempt # {attempts}\n")
if attempts >= 50:
self.loggger.info("Max need_updates merge attempts exceeded, cannot perform " +
"updates on this repo.\n")
else:
need_updates = need_updates.drop([column for column in list(need_updates.columns) if \
column not in action_map['update']['augur'] and column not in action_map['insert']['augur']],
axis='columns')
for column in action_map['insert']['augur']:
need_updates[f'b_{column}'] = need_updates[column]
need_updates = need_updates.drop([column for column in action_map['insert']['augur']], axis='columns')
return need_insertion.to_dict('records'), need_updates.to_dict('records')
def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}):
""" DEPRECATED
SPG 9/15/2021 TODO -- Why is this deprecated?
Include an extra key-value pair on each element of new_data that represents
the action that should be taken with this element (i.e. 'need_insertion')
:param new_data: List of dictionaries, data to be assigned an action to
:param table_values: Pandas DataFrame, existing data in the database to check
what action should be taken on the new_data depending on the presence of
each element in this DataFrame
:param update_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
updates (if source data value != value in existing database row, then an
update is needed). Key is source data column name, value is database field name.
Example: {'id': 'gh_issue_id'}
:param duplicate_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
duplicates (if source data value == value in existing database row, then this
element is a duplicate and would not need an insertion). Key is source data
column name, value is database field name. Example: {'id': 'gh_issue_id'}
:param table_pkey: String, the field name of the primary key of the table in
the database that we are checking the table_values for.
:param value_update_col_map: Dictionary, sometimes we add a new field to a table,
and we want to trigger an update of that row in the database even if all of the
data values are the same and would not need an update ordinarily. Checking for
a specific existing value in the database field allows us to do this. The key is the
name of the field in the database we are checking for a specific value to trigger
an update, the value is the value we are checking for equality to trigger an update.
Example: {'cntrb_id': None}
:return: List of dictionaries, contains all the same elements of new_data, except
each element now has an extra key-value pair with the key being 'flag', and
the value being 'need_insertion', 'need_update', or 'none'
"""
need_insertion_count = 0
need_update_count = 0
if type(table_values) == list:
if len(table_values) > 0:
table_values = pd.DataFrame(table_values, columns=table_values[0].keys())
else:
table_values = pd.DataFrame(table_values)
for i, obj in enumerate(new_data):
if type(obj) != dict:
new_data[i] = {'flag': 'none'}
continue
obj['flag'] = 'none' # default of no action needed
existing_tuple = None
for db_dupe_key in list(duplicate_col_map.keys()):
if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any():
if table_values[table_values[db_dupe_key].isin(
[obj[duplicate_col_map[db_dupe_key]]])].to_dict('records'):
existing_tuple = table_values[table_values[db_dupe_key].isin(
[obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0]
continue
obj['flag'] = 'need_insertion'
need_insertion_count += 1
break
if obj['flag'] == 'need_insertion':
continue
if not existing_tuple:
self.logger.info('An existing tuple was not found for this data ' +
'point and we have reached the check-updates portion of assigning ' +
'tuple action, so we will now move to next data point\n')
continue
# If we need to check the values of the existing tuple to determine if an update is needed
''' This "value_check" is really really what I think we want to be doing for the update to issue status
TODO SPG 9/15/2021. '''
for augur_col, value_check in value_update_col_map.items():
not_nan_check = not (math.isnan(value_check) and math.isnan(existing_tuple[augur_col])) if value_check is not None else True
if existing_tuple[augur_col] != value_check and not_nan_check:
continue
self.logger.info("Found a tuple that needs an update for column: {}\n".format(augur_col))
obj['flag'] = 'need_update'
obj['pkey'] = existing_tuple[table_pkey]
need_update_count += 1
if obj['flag'] == 'need_update':
self.logger.info('Already determined that current tuple needs update, skipping checking further updates. '
'Moving to next tuple.\n')
continue
# Now check the existing tuple's values against the response values to determine if an update is needed
for col in update_col_map.keys():
if update_col_map[col] not in obj:
continue
if obj[update_col_map[col]] == existing_tuple[col]:
continue
self.logger.info("Found a tuple that needs an update for column: {}\n".format(col))
obj['flag'] = 'need_update'
self.logger.info(existing_tuple)
obj['pkey'] = existing_tuple[table_pkey]
need_update_count += 1
self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) +
"was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count))
return new_data
def check_duplicates(self, new_data, table_values, key):
""" Filters what items of the new_data json (list of dictionaries) that are not
present in the table_values df
:param new_data: List of dictionaries, new data to filter duplicates out of
:param table_values: Pandas DataFrame, existing data to check what data is already
present in the database
:param key: String, key of each dict in new_data whose value we are checking
duplicates with
:return: List of dictionaries, contains elements of new_data that are not already
present in the database
"""
need_insertion = []
for obj in new_data:
if type(obj) != dict:
continue
if not table_values.isin([obj[key]]).any().any():
need_insertion.append(obj)
self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) +
"was reduced to {} tuples.\n".format(str(len(need_insertion))))
return need_insertion
def get_max_id(self, table, column, default=25150, operations_table=False):
""" Gets the max value (usually used for id/pk's) of any Integer column
of any table
:param table: String, the table that consists of the column you want to
query a max value for
:param column: String, the column that you want to query the max value for
:param default: Integer, if there are no values in the
specified column, the value of this parameter will be returned
:param operations_table: Boolean, if True, this signifies that the table/column
that is wanted to be queried is in the augur_operations schema rather than
the augur_data schema. Default False
:return: Integer, the max value of the specified column/table
"""
maxIdSQL = s.sql.text("""
SELECT max({0}.{1}) AS {1}
FROM {0}
""".format(table, column))
db = self.db if not operations_table else self.helper_db
rs = pd.read_sql(maxIdSQL, db, params={})
if rs.iloc[0][column] is not None:
max_id = int(rs.iloc[0][column]) + 1
self.logger.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id))
else:
max_id = default
self.logger.warning("Could not find max id for {} column in the {} table... " +
"using default set to: {}\n".format(column, table, max_id))
return max_id
def get_table_values(self, cols, tables, where_clause=""):
""" Can query all values of any column(s) from any table(s)
with an optional where clause
:param cols: List of Strings, column(s) that user wants to query
:param tables: List of Strings, table(s) that user wants to query
:param where_clause: String, optional where clause to filter the values
queried
:return: Pandas DataFrame, contains all values queried in the columns, tables, and
optional where clause provided
"""
table_str = tables[0]
del tables[0]
col_str = cols[0]
del cols[0]
for table in tables:
table_str += ", " + table
for col in cols:
col_str += ", " + col
table_values_sql = s.sql.text("""
SELECT {} FROM {} {}
""".format(col_str, table_str, where_clause))
self.logger.info("Getting table values with the following PSQL query: \n{}\n".format(
table_values_sql))
values = pd.read_sql(table_values_sql, self.db, params={})
return values
def bulk_insert(
self, table, insert=[], update=[], unique_columns=[], update_columns=[],
max_attempts=3, attempt_delay=3, increment_counter=True, convert_float_int=False
):
""" Performs bulk inserts/updates of the given data to the given table
:param table: String, name of the table that we are inserting/updating rows
:param insert: List of dicts, data points to insert
:param update: List of dicts, data points to update, only needs key/value
pairs of the update_columns and the unique_columns
:param unique_columns: List of strings, column names that would uniquely identify any
given data point
:param update_columns: List of strings, names of columns that are being updated
:param max_attempts: Integer, number of attempts to perform on inserting/updating
before moving on
:param attempt_delay: Integer, number of seconds to wait in between attempts
:returns: SQLAlchemy database execution response object(s), contains metadata
about number of rows inserted etc. This data is not often used.
"""
self.logger.info(
f"{len(insert)} insertions are needed and {len(update)} "
f"updates are needed for {table}"
)
update_result = None
insert_result = None
if len(update) > 0:
attempts = 0
update_start_time = time.time()
while attempts < max_attempts:
try:
update_result = self.db.execute(
table.update().where(
eval(
' and '.join(
[
f"self.{table}_table.c.{key} == bindparam('b_{key}')"
for key in unique_columns
]
)
)
).values(
{key: key for key in update_columns}
),
update
)
if increment_counter:
self.update_counter += update_result.rowcount
self.logger.info(
f"Updated {update_result.rowcount} rows in "
f"{time.time() - update_start_time} seconds"
)
break
except Exception as e:
self.logger.info(f"Warning! Error bulk updating data: {e}")
time.sleep(attempt_delay)
attempts += 1
if len(insert) > 0:
insert_start_time = time.time()
def psql_insert_copy(table, conn, keys, data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
table : pandas.io.sql.SQLTable
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : Iterable that iterates the values to be inserted
"""
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as curs:
s_buf = io.StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ', '.join('"{}"'.format(k) for k in keys)
if table.schema:
table_name = '{}.{}'.format(table.schema, table.name)
else:
table_name = table.name
sql = 'COPY {} ({}) FROM STDIN WITH (FORMAT CSV, encoding "UTF-8")'.format(
table_name, columns)
#(FORMAT CSV, FORCE_NULL(column_name))
self.logger.debug(f'table name is: {table_name}, and columns are {columns}.')
self.logger.debug(f'sql is: {sql}')
#This causes the github worker to throw an error with pandas
#cur.copy_expert(sql=sql, file=self.text_clean(s_buf))
# s_buf_encoded = s_buf.read().encode("UTF-8")
#self.logger.info(f"this is the sbuf_encdoded {s_buf_encoded}")
try:
#Session=sessy.sessionmaker(bind=curs)
#session=Session()
#session.copy_expert(sql=sql, file=s_buf)
#copy_expert(sql=sql, file=s_buf)
curs.copy_expert(sql=sql, file=s_buf)
#session.commit()
#self.logger.info("message committed")
dbapi_conn.commit()
# self.logger.debug("good dog. record committed! Watson, come quick!!!")
except psycopg2.errors.UniqueViolation as e:
self.logger.info(f"{e}")
dbapi_conn.rollback()
except Exception as e:
self.print_traceback("Bulk insert error", e, True)
dbapi_conn.rollback()
try:
df = pd.DataFrame(insert)
if convert_float_int:
df = self._convert_float_nan_to_int(df)
df.to_sql(
schema = self.db_schema,
name=table.name,
con=self.db,
if_exists="append",
index=False,
#method=None,
method=psql_insert_copy,
#dtype=dict,
chunksize=1
)
if increment_counter:
self.insert_counter += len(insert)
self.logger.info(
f"Inserted {len(insert)} rows in {time.time() - insert_start_time} seconds "
"thanks to postgresql's COPY FROM CSV! :)"
)
except Exception as e:
self.logger.info(f"Bulk insert error 2: {e}. exception registered.")
return insert_result, update_result
def text_clean(self, data, field):
""" "Cleans" the provided field of each dict in the list of dicts provided
by removing NUL (C text termination) characters
Example: "\u0000"
:param data: List of dicts
:param field: String
:returns: Same data list with each element's field updated with NUL characters
removed
"""
#self.logger.info(f"Original data point{field:datapoint[field]}")
return [
{
**data_point,
#field: data_point[field].replace("\x00", "\uFFFD")
#self.logger.info(f"Null replaced data point{field:datapoint[field]}")
## trying to use standard python3 method for text cleaning here.
# This was after `data_point[field]` for a while as `, "utf-8"` and did not work
# Nay, it cause silent errors without insert; or was part of that hot mess.
# field: bytes(data_point[field]).decode("utf-8", "ignore")
field: bytes(data_point[field], "utf-8").decode("utf-8", "ignore").replace("\x00", "\uFFFD")
#0x00
} for data_point in data
]
# def text_clean(self, data, field):
# """ "Cleans" the provided field of each dict in the list of dicts provided
# by removing NUL (C text termination) characters
# Example: "\u0000"
# :param data: List of dicts
# :param field: String
# :returns: Same data list with each element's field updated with NUL characters
# removed
# """
# return [
# {
# **data_point,
# field: data_point[field].replace("\x00", "\uFFFD")
# } for data_point in data
# ]
def _add_nested_columns(self, df, column_names):
# todo: support deeper nests (>1) and only expand necessary columns
# todo: merge with _get_data_set_columns
for column in column_names:
self.logger.debug(f"column included: {column}.")
if '.' not in column:
continue
# if the column is already present then we
# dont' need to try to add it again
if column in df.columns:
continue
root = column.split('.')[0]
if root not in df.columns:
df[root] = None
expanded_column = pd.DataFrame(
df[root].where(df[root].notna(), lambda x: [{}]).tolist()
)
expanded_column.columns = [
f'{root}.{attribute}' for attribute in expanded_column.columns
]
self.logger.debug('\n')
self.logger.debug('\n')
self.logger.debug('\n')
self.logger.debug('\n')
self.logger.debug(f'Expanded Columns Are:{expanded_column.columns}')
self.logger.debug('\n')
self.logger.debug('\n')
self.logger.debug('\n')
if column not in expanded_column.columns:
expanded_column[column] = None
try:
df = df.join(expanded_column)
except ValueError as e:
# columns already added (happens if trying to expand the same column twice)
# TODO: Catch this before by only looping unique prefixs?
self.print_traceback("value error in _add_nested_columns", e, True)
except Exception as e:
self.print_traceback("_add_nested_columns", e, True)
finally:
self.logger.debug(f"finished _add_nested_columns.")
return df
def enrich_data_primary_keys(
self, source_data, table, gh_merge_fields, augur_merge_fields, in_memory=True
):
''' the gh_merge_fields are almost always direct from the source in the action map.
the augur_merge fields are the fieldnames where augur perists the source values.
These are almost never (never) the primary keys on our table. They are the natural
keys at the source, I think, with some probability close to 1 (SPG 9/13/2021).'''
''' SPG 9/15/2021: This seems method may be the source of duplicate inserts that seem like
they should not actually get run because we are specifying the natural key in the insert map.
I really don't completely understand what we are doing here. '''
self.logger.info("Preparing to enrich data.\n")
if len(source_data) == 0:
self.logger.info("There is no source data to enrich.\n")
return source_data
source_df = self._add_nested_columns( | pd.DataFrame(source_data) | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
import random
import kMeans as k
class kMeans_spec(unittest.TestCase):
def setUp(self):
return 0
def test_random_element(self):
df = pd.DataFrame([[1,0,2],
[2,0,2.5],
[3,6,1.5]],None,['a1','a2','a3'])
result = k.random_sample(df,2)
#print(result)
def test_create_clusters(self):
df = pd.DataFrame([[1,0,2],
[2,0,2.5],
[3,6,1.5]],None,['a1','a2','a3'])
result = k.create_clusters(df)
#print(result)
def test_distance_equal(self):
e1 = pd.Series([2,1,2])
e2 = pd.Series([2,1,2])
result = k.distance(e1,e2)
self.assertEqual(0,result)
def test_distance_different(self):
e1 = pd.Series([2,1])
e2 = pd.Series([1,3])
result = k.distance(e1,e2)
self.assertEqual(np.sqrt(5),result)
def test_closest(self):
element = pd.Series([2,1,2],['a1','a2','a3'])
centers = pd.DataFrame([[1,0,2],
[2,0,2.5],
[3,6,1.5]],None,['a1','a2','a3'])
result = k.closest(element,centers)
expected = pd.Series([2,0,2.5])
assert ((expected == result).all())
def test_find_center(self):
df = pd.DataFrame([[1,0,2],
[2,0,2.5],
[3,6,1.5]],None,['a1','a2','a3'])
result = df.mean(axis=0)
#print(result)
def test_find_centers(self):
clusters = [pd.DataFrame([[2,0,2.5],
[3,6,1.5]],None,['a1','a2','a3']),
pd.DataFrame([[1,0,2 ]],None,['a1','a2','a3'])]
result = k.find_centers(clusters)
#print(result)
def test_assign_clusters(self):
df = pd.DataFrame([[1,0,2],
[2,0,2.5],
[3,6,1.5]],None,['a1','a2','a3'])
centers = pd.DataFrame([[1,0.5,2 ],
[2,0.5,2.5]],None,['a1','a2','a3'])
clusters = [pd.DataFrame(),
| pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import multiprocessing as mp
from tqdm import tqdm
import h5py
import os
###########################################
def match_profile_coords():
# After applying profile mask, the masked df_profile should match the df_beads on both coordinates and seq.
amino_acids = pd.read_csv('amino_acids.csv')
vocab = {y.upper(): x for x, y in zip(amino_acids.AA, amino_acids.AA3C)}
# profile_dir = 'training_100_profile'
profile_dir = 'validation_profile'
bead_dir = 'proteinnet_beads'
pdb1 = pd.read_csv(f'{profile_dir}/flist.txt')['fname']
# pdb2 = pdb1.apply(lambda x: x.split('_')[0] + '_' + x.split('_')[2])
pdb2 = pdb1.apply(lambda x: x.split('_')[0][3:] + '_' + x.split('_')[2])
bad = []
good = []
for p1, p2 in tqdm(zip(pdb1, pdb2)):
p2_path = f'{bead_dir}/{p2}_bead.csv'
if not os.path.exists(p2_path):
continue
df1 = pd.read_csv(f'{profile_dir}/{p1}')
df2 = pd.read_csv(p2_path)
mask = df1['mask']
if df1[mask==1].shape[0] == df2.shape[0]:
df1m = df1[mask==1].copy()
ca1 = df1m[['x', 'y', 'z']].values
ca2 = df2[['xca', 'yca', 'zca']].values
seq1 = ''.join(df1m['group_name'].values)
seq2 = ''.join(df2['group_name'].apply(lambda x: vocab[x]).values)
if np.abs(ca1 - ca2).max()>0.002:
# print(p1)
bad.append(p1)
elif seq1 != seq2:
# print(p1, 'seq diff')
bad.append(p1)
else:
good.append(p1)
# df1m['xs'] = df2['xs'].values
# df1m['ys'] = df2['ys'].values
# df1m['zs'] = df2['zs'].values
# df1m.to_csv(f'{data_dir}/{p1}_match.csv', index=False, float_format='%.4f')
df = pd.DataFrame({'pdb': good})
# df.to_csv('bead_profile_match.csv', index=False)
df.to_csv('bead_profile_match_validation.csv', index=False)
###########################################
def _rotation_matrix(c1, c2):
z = np.cross(c1, c2)
x = c1
y = np.cross(z, x)
x = x / np.sqrt(np.sum(x ** 2))
y = y / np.sqrt(np.sum(y ** 2))
z = z / np.sqrt(np.sum(z ** 2))
R = np.vstack([x, y, z])
# Rinv = np.linalg.inv(R.T)
return R
def extract_one_topk(pdb_id, df_beads, df_profile, local_rot_dir, k=10, mode='CA'):
if df_beads.shape[0] < 20:
return
idx = (df_profile['mask'] == 1)
group_num = df_profile['group_num'].values[idx]
group_name = df_profile['group_name'].values[idx]
if mode == 'CA':
group_coords = df_beads[['xca', 'yca', 'zca']].values
elif mode == 'CB':
group_coords = df_beads[['xcb', 'ycb', 'zcb']].values
elif mode == 'CAS':
group_coords = (df_beads[['xca', 'yca', 'zca']].values + df_beads[['xs', 'ys', 'zs']].values) / 2
else:
raise ValueError('mode should be CA / CB / CAS.')
df_list = []
count_res = []
for i, gc in enumerate(group_num):
if (gc-1 not in group_num) | (gc+1 not in group_num) | (gc-2 not in group_num) | (gc+2 not in group_num):
continue
# coords of the previous 2 and next 2 groups in local peptide segment
cen_i = (group_num == gc)
pre_i = (group_num == gc-1)
next_i = (group_num == gc+1)
pre2_i = (group_num == gc-2)
next2_i = (group_num == gc+2)
coords = group_coords - group_coords[cen_i] # center
c1 = coords[pre_i]
c2 = coords[next_i]
if np.sum(c1**2) == 0:
break
if np.sum(c2**2) == 0:
break
rotate_mat = _rotation_matrix(c1, c2)
# get central segment
ind = (cen_i | pre_i | next_i | pre2_i | next2_i)
gnum_seg = group_num[ind]
gname_seg = group_name[ind]
coords_seg = coords[ind]
coords_seg = np.squeeze(np.matmul(rotate_mat[None, :, :], coords_seg[:, :, None]))
# get nearest k residues from other residues
gnum_others = group_num[~ind]
gname_others = group_name[~ind]
coords_others = coords[~ind]
dist_i = np.sqrt((coords_others**2).sum(axis=1))
dist_i_arg = np.argsort(dist_i)
topk_arg = dist_i_arg[:k]
# topk_arg = (dist_i < 8)
# count_6a = dist_i[dist_i < 6].shape[0]
count_8a = dist_i[dist_i < 8].shape[0]
count_10a = dist_i[dist_i < 10].shape[0]
count_12a = dist_i[dist_i < 12].shape[0]
# count_res.append(np.array([count_6a, count_8a, count_10a, count_12a]))
gnum_topk = gnum_others[topk_arg]
gname_topk = gname_others[topk_arg]
coords_topk = coords_others[topk_arg]
coords_topk = np.squeeze(np.matmul(rotate_mat[None, :, :], coords_topk[:, :, None]))
# concat central segment and top_k
gnum = np.append(gnum_seg, gnum_topk)
gname = np.append(gname_seg, gname_topk)
coords = np.vstack((coords_seg, coords_topk))
distance = np.sqrt(np.sum(coords**2, axis=1))
segment_info = np.ones(gnum.shape[0], dtype=int) * 5
segment_info[gnum == gc] = 0
segment_info[gnum == gc-1] = 1
segment_info[gnum == gc+1] = 2
segment_info[gnum == gc-2] = 3
segment_info[gnum == gc+2] = 4
df_g = pd.DataFrame({'center_num': gc,
'group_num': gnum,
'group_name': gname,
'x': coords[:, 0],
'y': coords[:, 1],
'z': coords[:, 2],
'distance': distance,
'segment': segment_info,
'count8a': count_8a,
'count10a': count_10a,
'count12a': count_12a})
# df_g = df_g.sort_values(by=['segment', 'distance'])
def re_order_df_g(df_g):
df_g = df_g.sort_values(by=['segment', 'group_num'])
group_num = df_g['group_num'].values
distance = df_g['distance'].values
# set segment id
seg = np.ones(15, dtype=np.int)
seg[5] = 2
for i in range(6, 15):
if group_num[i] == group_num[i - 1] + 1:
seg[i] = seg[i - 1]
else:
seg[i] = seg[i - 1] + 1
# calculate mean distance of segment
seg_dist = np.zeros(15)
for i in range(5, 15):
seg_dist[i] = distance[seg == seg[i]].mean()
df_g['seg'] = seg
df_g['seg_dist'] = seg_dist
df_g = df_g.sort_values(by=['segment', 'seg_dist', 'group_num'])
return df_g
df_g = re_order_df_g(df_g)
df_list.append(df_g)
if len(df_list)>0:
df = pd.concat(df_list, ignore_index=True)
idx = df['group_num'].values
for i in range(20):
aa_i = f'aa{i}'
df[aa_i] = df_profile[aa_i].values[idx]
df.to_csv(f'{local_rot_dir}/{pdb_id}_{mode}.csv', index=False, float_format='%.4f')
# count_res = np.vstack(count_res)
# df_count = pd.DataFrame({'count_6a': count_res[:, 0],
# 'count_8a': count_res[:, 1],
# 'count_10a': count_res[:, 2],
# 'count_12a': count_res[:, 3]})
# df_count.to_csv(f'{local_rot_dir}/{pdb_id}_count_res.csv', index=False)
def extract_local_structure():
dataset = 'training_30'
# dataset = 'validation'
mode = 'CAS'
if dataset == 'training_30':
# match training_30_protein_id to bead_profile_match.csv
data_dir = 'local_rot_training_30_v3'
pdb_list = pd.read_csv('training_30_protein_id2.csv')['pdb_id'].values
beads_list = pd.read_csv('bead_profile_match.csv')['pdb'].values
elif dataset == 'validation':
data_dir = 'local_rot_validation_v3'
pdb_list = pd.read_csv('validation_protein_id2.csv')['pdb'].values
beads_list = pd.read_csv('bead_profile_match_validation.csv')['pdb'].values
else:
raise ValueError('dataset not found')
pdb_list = list(set(pdb_list) & set(beads_list))
def extract_batch(batch):
for i in tqdm(batch):
pdb_id = pdb_list[i]
if dataset == 'training_30':
pdb_id_bead = pdb_id.split('_')[0] + '_' + pdb_id.split('_')[2]
profile_dir = 'training_100_profile'
elif dataset == 'validation':
pdb_id_bead = pdb_id.split('_')[0][3:] + '_' + pdb_id.split('_')[2]
profile_dir = 'validation_profile'
df_beads = pd.read_csv(f'proteinnet_beads/{pdb_id_bead}_bead.csv')
df_profile = pd.read_csv(f'{profile_dir}/{pdb_id}_profile.csv')
extract_one_topk(pdb_list[i], df_beads, df_profile, data_dir, mode=mode)
count = len(pdb_list)
num_cores = 40
batch_size = count // num_cores + 1
idx_list = np.arange(count)
batch_list = []
for i in range(0, count, batch_size):
batch = idx_list[i:i+batch_size]
batch_list += [batch]
# setup the multi-processes
with mp.Pool(processes=num_cores) as pool:
pool.map(extract_batch, batch_list)
############################################
def save_local_h5():
input_file = 'training_30'
# input_file = 'validation'
# input_file = 'testing'
mode = 'CAS'
local_rot_dir = f'local_rot_{input_file}_v3/'
# flist = pd.read_csv(f'{local_rot_dir}/flist_{mode}.txt')['fname'].values
# load tht hhsuite-proteinnet-pdb matched pdb list
flist = pd.read_csv('hh_ca_pdb_list.txt')['pdb_profile']
flist = flist.apply(lambda x: x + f'_{mode}.csv')
df_list = []
for fname in tqdm(flist):
df = pd.read_csv(f'{local_rot_dir}/{fname}')
df['pdb'] = fname[:-7]
df_list.append(df)
df = pd.concat(df_list, ignore_index=True)
# save data to hdf5
amino_acids = pd.read_csv('amino_acids.csv')
vocab = {x.upper(): y - 1 for x, y in zip(amino_acids.AA, amino_acids.idx)}
# segment = df['segment'].values
k = 15
seq = df['group_name'].apply(lambda x: vocab[x])
seq = seq.values.reshape((-1, k))
group_num = df['group_num'].values.reshape((-1, k))
coords = df[['x', 'y', 'z']].values.reshape((-1, k, 3))
profile = df[[f'aa{i}' for i in range(20)]].values.reshape((-1, k, 20))
pdb = df['pdb'].values.reshape((-1, k))[:, 0]
seg = df['seg'].values
seg = seg.reshape(-1, k)
start_id = np.zeros_like(seg)
idx = (seg[:, 1:] - seg[:, :-1] == 0)
start_id[:, 1:][idx] = 1
res_counts = df[['count8a', 'count10a', 'count12a']].values.reshape(-1, k, 3)[:, 0, :]
distance = df['distance'].values
dist = distance.reshape(-1, k)
dist_max = dist.max(axis=-1)
print(seq.min(), coords.min(), profile.min(), group_num.max(), res_counts.max())
print(seq.shape, coords.shape, profile.shape, group_num.shape, res_counts.shape)
# clean using residues distances
# idx = (dist[:, 1] < 4) & (dist[:, 2] < 4) & (dist[:, 3] < 8) & (dist[:, 4] < 8) & (dist_max < 20)
idx = (dist_max < 20)
seq = seq[idx]
group_num = group_num[idx]
coords = coords[idx]
profile = profile[idx]
start_id = start_id[idx]
res_counts = res_counts[idx]
pdb = pdb[idx]
print(seq.min(), coords.min(), profile.min(), group_num.max(), res_counts.max())
print(seq.shape, coords.shape, profile.shape, group_num.shape, res_counts.shape)
# shuffle
num = seq.shape[0]
idx = np.arange(num)
np.random.shuffle(idx)
seq = seq[idx]
group_num = group_num[idx]
coords = coords[idx]
profile = profile[idx]
start_id = start_id[idx]
res_counts = res_counts[idx]
pdb = pdb[idx]
df_pdb = pd.DataFrame({'pdb': pdb})
df_pdb['pdb'] = df_pdb['pdb'].apply(lambda x: x.split('_')[0] + '_' + x.split('_')[2])
df_pdb.to_csv(f'{input_file}_{mode}_pdb.csv', index=False)
with h5py.File(f'{input_file}_{mode}_v2.h5', 'w') as f:
dset = f.create_dataset("seq", shape=seq.shape, data=seq, dtype='i1')
dset = f.create_dataset("group_num", shape=group_num.shape, data=group_num, dtype='i')
dset = f.create_dataset("start_id", shape=start_id.shape, data=start_id, dtype='i1')
dset = f.create_dataset("res_counts", shape=res_counts.shape, data=res_counts, dtype='i2')
dset = f.create_dataset("coords", shape=coords.shape, data=coords, dtype='f4')
dset = f.create_dataset("profile", shape=profile.shape, data=profile, dtype='f4')
# if input_file == 'training_30':
# N = 100000
# df_pdb[N:].to_csv(f'{input_file}_{mode}_pdb_train.csv', index=False)
#
# with h5py.File(f'{input_file}_{mode}_v2_train.h5', 'w') as f:
# dset = f.create_dataset("seq", shape=seq[N:].shape, data=seq[N:], dtype='i1')
# dset = f.create_dataset("group_num", shape=group_num[N:].shape, data=group_num[N:], dtype='i')
# dset = f.create_dataset("start_id", shape=start_id[N:].shape, data=start_id[N:], dtype='i1')
# dset = f.create_dataset("res_counts", shape=res_counts[N:].shape, data=res_counts[N:], dtype='i2')
# dset = f.create_dataset("coords", shape=coords[N:].shape, data=coords[N:], dtype='f4')
# dset = f.create_dataset("profile", shape=profile[N:].shape, data=profile[N:], dtype='f4')
#
# df_pdb[:N].to_csv(f'{input_file}_{mode}_pdb_val.csv', index=False)
# with h5py.File(f'training_30_{mode}_v2_val.h5', 'w') as f:
# dset = f.create_dataset("seq", shape=(N, k), data=seq[:N], dtype='i1')
# dset = f.create_dataset("group_num", shape=(N, k), data=group_num[:N], dtype='i')
# dset = f.create_dataset("start_id", shape=(N, k), data=start_id[:N], dtype='i1')
# dset = f.create_dataset("res_counts", shape=(N, 3), data=res_counts[:N], dtype='i2')
# dset = f.create_dataset("coords", shape=(N, k, 3), data=coords[:N], dtype='f4')
# dset = f.create_dataset("profile", shape=(N, k, 20), data=profile[:N], dtype='f4')
def save_small():
k = 15
input_file = 'training_30'
mode = 'CA'
df_pdb = pd.read_csv(f'{input_file}_{mode}_pdb_small.csv')
data = h5py.File(f'{input_file}_{mode}_v2.h5', 'r')
seq = data['seq'][()]
group_num = data['group_num'][()]
start_id = data['start_id'][()]
res_counts = data['res_counts'][()]
coords = data['coords'][()]
profile = data['profile'][()]
df_pdb[:1000].to_csv(f'{input_file}_{mode}_pdb_small.csv', index=False)
with h5py.File(f'training_30_small_{mode}.h5', 'w') as f:
dset = f.create_dataset("seq", shape=(1000, k), data=seq[:1000], dtype='i1')
dset = f.create_dataset("group_num", shape=(1000, k), data=group_num[:1000], dtype='i')
dset = f.create_dataset("start_id", shape=(1000, k), data=start_id[:1000], dtype='i1')
dset = f.create_dataset("res_counts", shape=(1000, 3), data=res_counts[:1000], dtype='i2')
dset = f.create_dataset("coords", shape=(1000, k, 3), data=coords[:1000], dtype='f4')
dset = f.create_dataset("profile", shape=(1000, k, 20), data=profile[:1000], dtype='f4')
####################################
def small_protein():
beads_list = pd.read_csv('bead_profile_match.csv')['pdb'].values
selected = ['1BPI_1_A', '2F4K_1_A', '2F21_d2f21a1', '2HBA_1_A', '2WXC_1_A', '2JOF_1_A', '1FME_1_A', '2P6J_1_A', '2A3D_1_A']
for pdb in selected:
if pdb in beads_list:
print(pdb)
pdb_bead = pdb.split('_')[0] + '_' + pdb.split('_')[2]
os.system(f'cp proteinnet_beads/{pdb_bead}_bead.csv small_protein/')
os.system(f'cp training_100_profile/{pdb}_profile.csv small_protein/')
def full_chain():
beads_list = pd.read_csv('bead_profile_match.csv')['pdb'].values
df = pd.read_csv('training_100-validation_protein_id2.csv')
idx = (df['pdb_res_count'] == df['seq_len'])
df = df[idx]
pdb_list = df['pdb_id'].values
match = np.zeros(pdb_list.shape[0], dtype=int)
for i, p in tqdm(enumerate(pdb_list)):
if p in beads_list:
match[i] = 1
idx = (match == 1)
df2 = df[idx]
df2.to_csv('protein_no_missing_residue_bead_profile_match.csv', index=False)
sample = df2.sample(n=100)
sample.to_csv('sample.csv', index=False)
for pdb in sample['pdb_id']:
print(pdb)
pdb_bead = pdb.split('_')[0] + '_' + pdb.split('_')[2]
os.system(f'cp proteinnet_beads/{pdb_bead}_bead.csv protein_sample/')
os.system(f'cp training_100_profile/{pdb}_profile.csv protein_sample/')
# idx = (df2['seq_len']<60) & (df2['profile_info']<0.6)
def reorder_local_struct():
input_file = 'training_30'
# input_file = 'validation'
# input_file = 'testing'
local_rot_dir = f'local_rot_{input_file}/'
local_rot_dir_v2 = f'local_rot_{input_file}_v2/'
if not os.path.exists(f'{local_rot_dir_v2}'):
os.system(f'mkdir -p {local_rot_dir_v2}')
def extract_one(fname):
df_list = []
df = pd.read_csv(f'{local_rot_dir}/{fname}')
if (input_file == 'validation') | (input_file == 'testing'):
df['pdb'] = fname[:-4]
for gc in df['center_num'].unique():
df_g = df[df['center_num'] == gc]
if df_g.shape[0] != 15:
print(fname)
continue
df_g = df_g.sort_values(by=['segment', 'group_num'])
group_num = df_g['group_num'].values
distance = df_g['distance'].values
# set segment id
seg = np.ones(15, dtype=np.int)
seg[5] = 2
for i in range(6, 15):
if group_num[i] == group_num[i - 1] + 1:
seg[i] = seg[i - 1]
else:
seg[i] = seg[i - 1] + 1
# calculate mean distance of segment
seg_dist = np.zeros(15)
for i in range(5, 15):
seg_dist[i] = distance[seg == seg[i]].mean()
df_g['seg'] = seg
df_g['seg_dist'] = seg_dist
df_g = df_g.sort_values(by=['segment', 'seg_dist', 'group_num'])
df_list.append(df_g)
df = pd.concat(df_list, ignore_index=True)
df.to_csv(f'{local_rot_dir_v2}/{fname}', index=False, float_format='%.4f')
def extract_batch(batch):
for fname in tqdm(batch):
extract_one(fname)
flist = pd.read_csv(f'{local_rot_dir}/flist.txt')['fname'].values
count = len(flist)
np.random.shuffle(flist)
num_cores = 40
batch_size = count // num_cores + 1
batch_list = []
for i in range(0, count, batch_size):
batch = flist[i:i+batch_size]
batch_list += [batch]
with mp.Pool(processes=num_cores) as pool:
pool.map(extract_batch, batch_list)
def test_reorder_one():
local_rot_dir = f'local_rot_training_30/'
fname = '12AS_1_A.csv'
df_list = []
df = | pd.read_csv(f'{local_rot_dir}/{fname}') | pandas.read_csv |
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
import collections
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import Helper
import json
import os
import holoviews as hv
from holoviews import opts, dim
# hv.extension('bokeh')
hv.extension('matplotlib')
hv.output(fig='svg', size=200)
#hv.output(fig='png', size=200)
def set_user_id(df, threshold):
# create the user_id col, from the diff of time and the optimal threshold
# and the previous zone if zone_level is higher than 0
splitted_level = Helper.zone_level.split('.')
if len(splitted_level) > 1 and int(splitted_level[0]) > 0:
# lvl > 0
#df["user_id"] = ((df['diff'] > threshold) | (df['hashed_mac'] != df['hashed_mac'].shift()) | (df['father_zone'] != df['father_zone'].shift())).cumsum()
#df["user_id"] = ((df['diff']>threshold) | (df['hashed_mac'] != df['hashed_mac'].shift()) | ( (df['father_zone'] != df['father_zone'].shift()) & ( ~df['father_zone'].isin(Helper.active_father_zones) | ~df['father_zone'].shift().isin(Helper.active_father_zones)) )).cumsum()
df["user_id"] = ((df['diff']>threshold) | (df['hashed_mac'] != df['hashed_mac'].shift()) | ( (df['father_zone'] != df['father_zone'].shift()) & ( (df['father_zone'] != Helper.active_father_zone) | (df['father_zone'].shift() != Helper.active_father_zone) ) )).cumsum()
else:
# lvl 0
df["user_id"] = ((df['diff'] > threshold) | (df['hashed_mac'] != df['hashed_mac'].shift())).cumsum()
# make the user_id as index
df.index = df.user_id
return df
def clean_df(df):
# delete from the df the samples with only 1 conection
aux = df["user_id"].value_counts()
df = df[df["user_id"].isin(aux.index[aux.gt(1)])]
# drop unnecesary cols
df = df.drop(columns=['hashed_mac', 'diff', 'user_id'])
splitted_level = Helper.zone_level.split('.')
if len(splitted_level) > 1 and int(splitted_level[0]) > 0:
df = df.drop(columns=['father_zone'])
return df
def set_zone(df):
# read json and set zones
# remove all zones that are not of that level
zones_info = Helper.read_json_file(Helper.project_path+Helper.config_paths['GeneralDirs']['info_zones']+"zonesLevel"+str(Helper.zone_level)+".json")
# set zones_names as a global var
Helper.new_global("zones_names", list(zones_info.keys()))
df["zone"] = df["ap_name"].apply(lambda x: Helper.get_zone_name_from_dict(x, zones_info))
# remove rows with rm value in zone col (samples that are not of any zone of the actual level)
df = df[df.zone != "rm"]
return df
def times_to_percentage(vector, total_time):
for n, element in enumerate(vector):
vector[n] = element/total_time*100
return vector
def clean_repeated(df):
# the index should be the user_id
# necessary to create the aux col because the drop_duplicate
# function dont detect the index col by its name
df["aux"] = df.index
df["aux2"] = (df.zone != df.zone.shift()).cumsum()
df = df.drop_duplicates(subset=['aux', 'aux2'])
df = df.drop(columns=["aux", "aux2"])
return df
def add_in_out(df):
in_df = df.groupby("user_id").first()
in_df["zone"] = "AAA"
out_df = df.groupby("user_id").last()
out_df["zone"] = "ZZZ"
df = df.append(in_df)
df = df.append(out_df)
df = df.sort_values(by=['user_id', 'date_time', 'zone'], ascending=True)
df['zone'] = df['zone'].replace({'AAA': 'IN'})
df['zone'] = df['zone'].replace({'ZZZ': 'OUT'})
return df
def time_on_zone(df):
# create the vector for all users
global first_zone_time, last_zone_time, actual_zone_time, actual_zone
users = []
# loop through every user
for user_id, user_data in df.groupby('user_id'):
# initialize vector of % time in Buildings
zones = []
for i in range(0, len(Helper.zones_names)):
zones.append(0)
# loop through every user connection
for i, (u_id, sample) in enumerate(user_data.iterrows()):
# first connection, imporant to save the time
if i == 0:
# this is the IN row
# we dont want to do nothing in this case
# because next row will start in exactly same time than that
pass
elif i == 1:
first_zone_time = sample.date_time
# initialize actual zone values
actual_zone = sample.zone
actual_zone_time = sample.date_time
# if has changed of zone or is the last sample, add the connection time to the respective zone
else:
# dont need to check if has changed of zone or is the last zone because we know
# that the zone of row i is different than row i+1, always
# add the connection time to the respective zone (in seconds)
zones[Helper.get_zone_index(actual_zone)] += (sample.date_time - actual_zone_time) / np.timedelta64(1, 's')
# update actual zone values
actual_zone = sample.zone
actual_zone_time = sample.date_time
# save last connection time
if i == len(user_data) - 1:
last_zone_time = sample.date_time
# calculate total connection time (in seconds)
total_user_time = (last_zone_time-first_zone_time) / np.timedelta64(1, 's')
# convert the times to percentatges
zones = times_to_percentage(zones, total_user_time)
# add it to the users vector
users.append(zones)
if Helper.save_jsons:
dir_json = Helper.get_route_according_validation('time_on_building')
Helper.create_dir_if_not_exists(dir_json)
with open(dir_json+"time_on_zone"+Helper.actual_day+".json", 'w') as fp:
json.dump(users, fp, indent=3)
return users
def get_times_on_zone(df, threshold):
df = set_user_id(df, threshold)
df = set_zone(df)
df = clean_df(df)
df = add_in_out(df)
df = clean_repeated(df)
users_vector = time_on_zone(df)
if Helper.save_csvs:
# save csv before apply clustering
dir_df = Helper.get_route_according_validation('df_before_clustering')
Helper.create_dir_if_not_exists(dir_df)
df.to_csv(dir_df+Helper.actual_day+".csv")
return df, users_vector
def apply_kmeans(df, vectors, n_clusters):
# https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html
n_clusters = int(n_clusters)
# define the model
model = KMeans(random_state=6969, n_clusters=n_clusters)
# fit the model
model.fit(vectors)
# assign a cluster to each sample
users_groups = model.predict(vectors)
# add cluster results to df
# create an auxiliar df with the clusters of each user
aux_data = {"user_id": df.index.unique(),"kmeans_cluster": users_groups}
aux_df = | pd.DataFrame(aux_data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Generate local files with the received delta translated excel files
# Read file in added data order.
# In[1]:
import pandas as pd
import openpyxl
import json
import os
import re
import glob
import argparse
# In[2]:
def load_json_as_df(json_data):
out_df = pd.DataFrame(list(json_data.items()),
columns=['Key', 'value'])
return out_df
# In[3]:
def read_json(json_file_path):
with open(json_file_path) as f:
data = json.load(f)
return data
# In[4]:
def reformat_json(json_obj):
json_dict = {}
for key, value in json_obj:
json_dict[key] = value
return json_dict
# In[5]:
def set_values(df_row):
try:
if pd.notnull(df_row[lang]) and len(str(df_row[lang]).strip()) != 0:
df_row['value'] = df_row[lang]
except:
print(df_row[lang])
return df_row
# In[6]:
def set_variables(df_row):
for value in allowed_values:
try:
if pd.notna(df_row[value]):
df_row[lang] = df_row[lang].replace('<'+ value + '>', df_row[value])
df_row['English value'] = df_row['English value'].replace('<'+ value + '>', df_row[value])
except:
pass
try:
if pd.notna(df_row['a-tag-replacement']):
start_index = df_row[lang].find('<a')+2
end_index = df_row[lang].find('>')
df_row[lang] = df_row[lang][:start_index] + df_row['a-tag-replacement'] + df_row[lang][end_index:]
df_row['English value'] = df_row['English value'][:start_index] + df_row['a-tag-replacement'] + df_row['English value'][end_index:]
except:
pass
return df_row
# In[7]:
def write_df_to_json(df, output_json_path):
jsonFile = df.to_json(orient='values')
json_string = json.loads(jsonFile)
reformatted_json = reformat_json(json_string)
with open(output_json_path, 'w') as f:
f.write(json.dumps(reformatted_json, indent = 4, ensure_ascii=False))
# In[8]:
def get_matched_count(excel_df, merged_df):
count = 0
for key in excel_df['Key']:
for k_key in merged_df['Key']:
if key == k_key:
count+=1
break
return count
# In[9]:
def read_excel_as_df(file, language_name):
excel = pd.ExcelFile(file)
for sheet_name in excel.sheet_names:
sheet = excel.parse(sheet_name = sheet_name, header=0)
if(len(sheet.columns) == 0):
continue
return sheet
return pd.DataFrame([], columns=[english_col, language_name])
# In[10]:
def clean_json_df(df):
out_df = df.copy()
out_df_dropped = out_df.drop_duplicates(subset=['Key'], keep='first')
return out_df
# In[11]:
def clean_read_excel_df(df, language_name):
FORMAT = [english_col,language_name]
for value in allowed_values:
if value in df.columns:
FORMAT.append(value)
filtered_sheet = df[FORMAT]
sheet_no_na = filtered_sheet.dropna(subset = [english_col], inplace=False)
sheet_new = sheet_no_na.rename(columns = {english_col: 'English value'}, inplace=False)
return sheet_new
# In[12]:
def clean_excel_df(df, language_name):
excel_df = df.copy()
try:
for i, row in excel_df.iterrows():
if pd.notna(row[language_name]):
row[language_name] = str(row[language_name]).strip()
except:
pass
excel_df = excel_df.drop_duplicates(subset=['English value'], keep='last')
return excel_df
# In[13]:
def read_excels_as_df(translation_excel_files, language_code, language_name):
excel_df = pd.DataFrame([], columns=[english_col, language_name])
for excel_file_name in translation_excel_files:
excel = pd.ExcelFile(excel_file_name)
for sheet_name in excel.sheet_names:
sheet = excel.parse(sheet_name = sheet_name, header=0)
if(len(sheet.columns) == 0):
continue
excel_df = | pd.concat([excel_df, sheet], axis=0) | pandas.concat |
import os
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from catboost import Pool
from catboost import CatBoostClassifier
import scipy.io.wavfile as wavfile
import python_speech_features.base as speech
class pohui:
def __init__(self):
if not os.path.exists('data/ours.csv'):
raise Exception("No base data to train on (data/ours.csv missing)")
if not os.path.exists('data/random.csv'):
raise Exception("No base data to train on (data/random.csv missing)")
self.ourdata = pd.read_csv('data/ours.csv')
self.ourdata = self.ourdata.sample(frac=1)
randoms = | pd.read_csv('data/random.csv') | pandas.read_csv |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import os, sys
from scipy.spatial import distance
from scipy.cluster import hierarchy
from scipy import stats
import matplotlib.patches as mpatches
import io
import base64
import random
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn import metrics, cluster
from sklearn import neighbors
import plotly.plotly as py
import plotly.graph_objs as go
import plotly
import umap, numba
#sns.set_style("white")
metadata_in_dropdown = ['experiment', 'strain', 'cluster']
muted = {name: 'rgba(' + str(a) + ', ' + str(b) + ', ' + str(c) + ')' for name, (a, b, c) \
in zip(['blue', 'green', 'red', 'purple', 'yellow', 'cyan'], sns.color_palette("muted"))}
def cluster_heatmap(data_df, feature_name_dict, categorical_df, categories, k_cluster=None,
method='ward', metric='euclidean', row_method=None, row_metric=None,
row_cluster=True, pairwise_complete_obs=True,
n_pca=None, caterogy_color_l=0.65, caterogy_color_s=0.65, color_seed=0,
figsize=(30,15), fontsize=(24, 20, 15), plot_x_labels=True, legend=True, mask=None, **kwargs):
'''draw heatmap with hierachical clustering for all cells using Seaborn.'''
np.all(data_df.index == categorical_df.index)
if row_method is None:
row_method = method
if row_metric is None:
row_metric = metric
if pairwise_complete_obs and metric == 'correlation': # use pandas corr()
data_scaled = (data_df - data_df.mean()) / data_df.std(ddof=0)
dist_corr_obs = distance.squareform(1 - data_scaled.T.corr()) # pairwise complete (allow na)
dist_corr_features = distance.squareform(1 - data_scaled.corr()) # pairwise complete (allow na)
data_scaled = data_scaled.values
row_linkage = hierarchy.linkage(dist_corr_features, method=row_method, metric=row_metric)
col_linkage = hierarchy.linkage(dist_corr_obs, method=method, metric=metric)
else:
scaler = preprocessing.StandardScaler().fit(data_df)
data_scaled = scaler.transform(data_df)
data_scaled_obs = data_scaled
dist_corr_features = data_scaled.T
if not n_pca is None:
pca = PCA(n_components = None)
pca.fit(data_scaled)
data_pca = pca.transform(data_scaled)
print(pca.explained_variance_ratio_.cumsum())
data_scaled_obs = data_pca[:, :n_pca]
row_linkage = hierarchy.linkage(dist_corr_features, method=row_method, metric=row_metric)
col_linkage = hierarchy.linkage(data_scaled_obs, method=method, metric=metric)
scaled_df = pd.DataFrame(data_scaled,
columns=[feature_name_dict.get(x, x) for x in data_df.columns],
index=data_df.index).T
categorical_all = []
hclust_labels = None
for feature in categories:
if feature == 'cluster':
if k_cluster is None:
raise ValueError('k_cluster not provided.')
hclust_labels = hierarchy.fcluster(col_linkage, k_cluster, criterion='maxclust')
hclust_labels = pd.Series(hclust_labels, name='cluster', index=data_df.index)
categorical_all.append(hclust_labels)
else:
categorical_all.append(categorical_df[feature])
if isinstance(caterogy_color_l, float):
caterogy_color_l = [caterogy_color_l] * len(categorical_all)
if isinstance(caterogy_color_s, float):
caterogy_color_s = [caterogy_color_s] * len(categorical_all)
# this may break if the inputs are not float or lists of the correct length
cat, luts = list(zip(*[categorical_color_mapping(x, l=l, s=s, seed=color_seed)
for x, l, s in zip(categorical_all, caterogy_color_l, caterogy_color_s)]))
g = sns.clustermap(scaled_df, center=0, row_linkage=row_linkage, col_linkage=col_linkage,
row_cluster=row_cluster, mask=mask,
col_colors = pd.DataFrame(list(cat)).T, figsize=figsize, cmap='RdBu_r', **kwargs)
_ = plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0, fontsize=fontsize[0])
_ = plt.setp(g.ax_col_colors.get_yticklabels(), rotation=0, fontsize=fontsize[1])
g.ax_heatmap.set_xlabel("")
if plot_x_labels:
_ = plt.setp(g.ax_heatmap.get_xticklabels(), rotation=90, fontsize = fontsize[2])
last_anchor = -0.25
else:
g.ax_heatmap.set_xticklabels([])
last_anchor = -0.05
if legend:
for i, (lut, legend_name) in enumerate(zip(luts, categories)):
legend_patches = []
legend_list = [(k, v) for k, v in lut.items()]
legend_list = sorted(legend_list, key=lambda x: x[0])
for k, v in legend_list:
legend_patches.append(mpatches.Patch(color=v, label=k))
avg_legend_len = np.mean([len(x) if isinstance(x, str) else 1 for x in lut.keys()])
ncol = int(figsize[0] * 3 / max(avg_legend_len, 10))
last_anchor -= 0.05 * (int((len(lut)-1)/ncol)+1+1)
if i != len(luts) - 1:
legend = g.ax_heatmap.legend(handles=legend_patches, fontsize=20, loc='lower center', bbox_to_anchor=[0.5, last_anchor], ncol=ncol, title=legend_name.title())
legend.get_title().set_fontsize(24)
legend = g.ax_heatmap.add_artist(legend)
else:
legend = g.ax_heatmap.legend(handles=legend_patches, fontsize=20, loc='lower center', bbox_to_anchor=[0.5, last_anchor], ncol=ncol, title=legend_name.title())
legend.get_title().set_fontsize(24)
return g, hclust_labels
def run_pca(data_df):
scaler = preprocessing.StandardScaler().fit(data_df)
data_scaled = scaler.transform(data_df)
pca = PCA(n_components = None)
pca.fit(data_scaled)
data_pca = pca.transform(data_scaled)
return pca, data_pca, data_scaled
def categorical_color_mapping(data, l=0.7, s=0.7, seed=0):
categories = np.unique(data)
colors = sns.hls_palette(len(categories), l=l, s=s)
random.seed(seed)
colors = random.sample(colors, len(colors), )
lut = dict(zip(categories, colors))
cat_color_mapping = data.map(lut)
return cat_color_mapping, lut
def byte_encode_img(fig):
'''save the figure into memory buffer and byte encode it for html.'''
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
encoded_heatmap = base64.b64encode(buf.getvalue())
buf.close()
# use decoded png on html
decoded_heatmap = 'data:image/png;base64,{}'.format(encoded_heatmap.decode())
return decoded_heatmap
def silhouette_plot(X_data, k_range=[5], method='ward', metric='euclidean', pairwise_complete_obs=True, n_pca=None):
if metric == 'precomputed':
dist = distance.squareform(X_data)
else:
if pairwise_complete_obs and metric == 'correlation': # use pandas corr()
data_scaled = (X_data - X_data.mean()) / X_data.std(ddof=0)
dist = distance.squareform(1 - data_scaled.T.corr()) # pairwise complete (allow na)
else:
scaler = preprocessing.StandardScaler().fit(X_data)
data_scaled = scaler.transform(X_data)
if not method == 'kmeans':
if metric == 'precomputed' or (pairwise_complete_obs and metric == 'correlation'):
linkage = hierarchy.linkage(dist, method=method, metric=metric)
else:
data_scaled_obs = data_scaled
if not n_pca is None:
pca = PCA(n_components = None)
pca.fit(data_scaled)
data_pca = pca.transform(data_scaled)
print(pca.explained_variance_ratio_.cumsum())
data_scaled_obs = data_pca[:, :n_pca]
linkage = hierarchy.linkage(data_scaled_obs, method=method, metric=metric)
fig, axes = plt.subplots(1, len(k_range), figsize=(5*len(k_range),5))
for i, n_clusters in enumerate(k_range):
ax = axes[i]
if method == 'kmeans':
clusterer = cluster.KMeans(n_clusters=n_clusters, random_state=0)
cluster_labels = clusterer.fit_predict(data_scaled)
else:
cluster_labels = hierarchy.fcluster(linkage, n_clusters, criterion='maxclust')
cluster_labels = | pd.Series(cluster_labels, name='cluster', index=X_data.index) | pandas.Series |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import pandas.util.testing as pdt
import qiime2
from q2_taxa import collapse, filter_table, filter_seqs
class CollapseTests(unittest.TestCase):
def assert_index_equal(self, a, b):
# this method is derived from scikit-bio 0.5.1
pdt.assert_index_equal(a, b,
exact=True,
check_names=True,
check_exact=True)
def assert_data_frame_almost_equal(self, left, right):
# this method is derived from scikit-bio 0.5.1
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
self.assert_index_equal(left.index, right.index)
def test_collapse(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;c', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_missing_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;__', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_bad_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
with self.assertRaisesRegex(ValueError, 'of 42 is larger'):
collapse(table, taxonomy, 42)
with self.assertRaisesRegex(ValueError, 'of 0 is too low'):
collapse(table, taxonomy, 0)
def test_collapse_missing_table_ids_in_taxonomy(self):
table = pd.DataFrame([[2.0, 2.0],
[1.0, 1.0],
[9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat3'])
with self.assertRaisesRegex(ValueError, 'missing.*feat2'):
collapse(table, taxonomy, 1)
class FilterTable(unittest.TestCase):
def test_filter_no_filters(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_table(table, taxonomy)
def test_alt_delimiter(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_table(table, taxonomy, include='<EMAIL>',
query_delimiter='@peanut@')
pdt.assert_frame_equal(obs, table, check_like=True)
# exclude with delimiter
obs = filter_table(table, taxonomy, exclude='<EMAIL>',
query_delimiter='@peanut@')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_filter_table_unknown_mode(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_table(table, taxonomy, include='bb', mode='not-a-mode')
def test_filter_table_include(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, include='cc,ee')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='dd')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='peanut!')
def test_filter_table_include_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa; bb; cc,aa; bb; dd ee',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='bb', mode='exact')
def test_filter_table_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='ab')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, exclude='xx')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='dd')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa')
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa; bb')
def test_filter_table_exclude_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='peanut!',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
exclude='aa; bb; cc,aa; bb; dd ee',
mode='exact')
def test_filter_table_include_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa', exclude='peanut!')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only - feat2 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - feat2 dropped at inclusion step
obs = filter_table(table, taxonomy, include='cc', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at inclusion step
obs = filter_table(table, taxonomy, include='ee', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features - all dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='aa',
exclude='bb',
mode='exact')
# keep no features - one dropped at inclusion, one dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='cc',
exclude='cc',
mode='exact')
# keep no features - all dropped at inclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='peanut',
exclude='bb',
mode='exact')
def test_filter_table_underscores_escaped(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep feat1 only - underscore not treated as a wild card
obs = filter_table(table, taxonomy, include='cc,d_')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - underscore in query matches underscore in
# taxonomy annotation
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; c_', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
obs = filter_table(table, taxonomy, include='c_')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_all_features_with_frequency_greater_than_zero_get_filtered(self):
table = pd.DataFrame([[2.0, 0.0], [1.0, 0.0], [9.0, 0.0], [1.0, 0.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# empty - feat2, which is matched by the include term, has a frequency
# of zero in all samples, so all samples end up dropped from the table
with self.assertRaisesRegex(ValueError,
expected_regex='greater than zero'):
filter_table(table, taxonomy, include='dd')
def test_extra_taxon_ignored(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee', 'aa; bb; cc'],
index=pd.Index(['feat1', 'feat2', 'feat3'],
name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
def test_missing_taxon_errors(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc'],
index=pd.Index(['feat1'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, expected_regex='All.*feat2'):
filter_table(table, taxonomy, include='bb')
class FilterSeqs(unittest.TestCase):
def test_filter_no_filters(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_seqs(seqs, taxonomy)
def test_alt_delimiter(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_seqs(seqs, taxonomy, include='cc<EMAIL>',
query_delimiter='@peanut@')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# exclude with delimiter
obs = filter_seqs(seqs, taxonomy, exclude='ww<EMAIL>',
query_delimiter='@peanut@')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
def test_filter_seqs_unknown_mode(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_seqs(seqs, taxonomy, include='bb', mode='not-a-mode')
def test_filter_seqs_include(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, include='bb')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='cc,ee')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only
obs = filter_seqs(seqs, taxonomy, include='cc')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='aa; bb; cc')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only
obs = filter_seqs(seqs, taxonomy, include='dd')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='dd ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, include='aa; bb; dd ee')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep no features
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy, include='peanut!')
def test_filter_seqs_include_exact_match(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, include='aa; bb; cc,aa; bb; dd ee',
mode='exact')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only
obs = filter_seqs(seqs, taxonomy, include='aa; bb; cc',
mode='exact')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only
obs = filter_seqs(seqs, taxonomy, include='aa; bb; dd ee',
mode='exact')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep no features
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy, include='bb', mode='exact')
def test_filter_seqs_exclude(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, exclude='ab')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='xx')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only
obs = filter_seqs(seqs, taxonomy, exclude='dd')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='dd ee')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; dd ee')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only
obs = filter_seqs(seqs, taxonomy, exclude='cc')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; cc')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep no features
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy, exclude='aa')
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb')
def test_filter_seqs_exclude_exact_match(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, exclude='peanut!',
mode='exact')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; dd ee',
mode='exact')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; dd ee,aa',
mode='exact')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; cc',
mode='exact')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
obs = filter_seqs(seqs, taxonomy, exclude='aa; bb; cc,aa',
mode='exact')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep no features
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy,
exclude='aa; bb; cc,aa; bb; dd ee',
mode='exact')
def test_filter_seqs_include_exclude(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_seqs(seqs, taxonomy, include='aa', exclude='peanut!')
exp = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only - feat2 dropped at exclusion step
obs = filter_seqs(seqs, taxonomy, include='aa', exclude='ee')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat1 only - feat2 dropped at inclusion step
obs = filter_seqs(seqs, taxonomy, include='cc', exclude='ee')
exp = pd.Series(['ACGT'], index=['feat1'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only - feat1 dropped at exclusion step
obs = filter_seqs(seqs, taxonomy, include='aa', exclude='cc')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep feat2 only - feat1 dropped at inclusion step
obs = filter_seqs(seqs, taxonomy, include='ee', exclude='cc')
exp = pd.Series(['ACCC'], index=['feat2'])
obs.sort_values(inplace=True)
exp.sort_values(inplace=True)
pdt.assert_series_equal(obs, exp)
# keep no features - all dropped at exclusion
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy,
include='aa',
exclude='bb',
mode='exact')
# keep no features - one dropped at inclusion, one dropped at exclusion
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy,
include='cc',
exclude='cc',
mode='exact')
# keep no features - all dropped at inclusion
with self.assertRaisesRegex(ValueError,
expected_regex='empty collection'):
obs = filter_seqs(seqs, taxonomy,
include='peanut',
exclude='bb',
mode='exact')
def test_filter_seqs_underscores_escaped(self):
seqs = pd.Series(['ACGT', 'ACCC'], index=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index= | pd.Index(['feat1', 'feat2'], name='id') | pandas.Index |
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
import seaborn as sns
import gc
import calendar
import pickle
import os
from sklearn.preprocessing import StandardScaler
from os.path import join
from sklearn.metrics import confusion_matrix
from IPython.display import clear_output, Image, display, HTML
from datetime import datetime
from IPython.display import display
plt.style.use('fivethirtyeight')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.options.display.float_format = '{:.4f}'.format
def missing_data(data):
total = data.isnull().sum()
percent = (data.isnull().sum()/data.isnull().count()*100)
tt = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
types = []
for col in data.columns:
dtype = str(data[col].dtype)
types.append(dtype)
tt['Types'] = types
return(np.transpose(tt))
#######################################################################
def read_Df(Path,name="data",format='pickle'):
reader=eval("pd.read_"+format)
data=reader(Path)
print(name)
display(data_characterization(data))
return data
#########################################################################
def data_characterization(data):
print("shape of data : "+str(data.shape))
data_characterization=pd.DataFrame()
columns=data.columns
Type=[]
Count=[]
unique_values=[]
Max=[]
Min=[]
Mean=[]
Nan_counts=data.isnull().sum().tolist()
Nan_ratio=(data.isnull().sum()/len(data)).values
Type=data.dtypes.tolist()
J=0
for i in columns :
unique=list(data[i].unique())
unique_values.append(unique)
Count.append(len(unique))
if (data[i].dtypes.name == 'object') :
Max.append(0)
Min.append(0)
Mean.append(0)
elif ( (data[i].dtypes == '<M8[ns]') ) :
Max.append(0)
Min.append(0)
Mean.append(0)
elif ( (data[i].dtype.name=="category") ) :
Max.append(0)
Min.append(0)
Mean.append(0)
else :
Max.append(data[i].max())
Min.append(data[i].min())
Mean.append(data[i].mean())
data_characterization["Columns name"]=columns
data_characterization["Type "]=data.dtypes.tolist()
data_characterization["Count unique values"]=Count
data_characterization["Count Nan values"]=Nan_counts
data_characterization["Ratio Nan values"]=Nan_ratio
data_characterization["Unique values"]=unique_values
data_characterization["Max"]=Max
data_characterization["Min"]=Min
data_characterization["Mean"]=Mean
display(data_characterization)
return None
#########################################################################
def Label_encoder(data):
data_new=data.copy()
categoria_features=data_new.columns[data.dtypes == 'object']
labels={}
for col in categoria_features :
fact=data_new[col].factorize()
data_new[col]= fact[0]
labels[col]=fact[1]
return data_new ,labels
#################################################################################
def visualisation_data(data,labels):
for i in data.columns :
data[i].plot.hist(bins=60)
plt.title(i)
if i in labels.keys():
plt.xticks(np.arange(len(labels[i])), labels[i].tolist(), rotation=90)
plt.show()
###################################################################################
def correlation_matrix_color_bar(df):
fig = plt.figure()
fig.set_size_inches(18.5, 10.5)
ax1 = fig.add_subplot(111)
cmap = cm.get_cmap('jet', 30)
cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
ax1.grid(True)
plt.title('Abalone Feature Correlation')
labels=df.columns.tolist()
ax1.set_xticklabels(labels,fontsize=20)
ax1.set_yticklabels(labels,fontsize=20)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
plt.show()
def correlation_matrix_pandas(data):
def magnify():
return [dict(selector="th",
props=[("font-size", "9pt")]),
dict(selector="td",
props=[('padding', "0em 0em")]),
dict(selector="th:hover",
props=[("font-size", "12pt")]),
dict(selector="tr:hover td:hover",
props=[('max-width', '200px'),
('font-size', '12pt')])
]
corr=data.corr()
cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)
return corr.style.background_gradient(cmap, axis=1)\
.set_properties(**{'max-width': '80px', 'font-size': '10pt'})\
.set_caption("Hover to magify")\
.set_precision(2)\
.set_table_styles(magnify())
###########################################################
def xl_date_to_simple_date(excel_date):
dt = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + excel_date - 2)
# tt = dt.timetuple()
return int(dt.strftime('%Y%m%d'))
###################################################################
def get_column_ratio(data,column):
nbrs_unique_values=data["data"].value_counts()
nbrs_unique_values.to_dict()
for key in nbrs_unique_values.keys():
print("ratio of "+str(key)+" : " +str(nbrs_unique_values[key]/float(len(data_ano))))
return
################################################################################################
# from sklearn.model_selection import train_test_split
# import xgboost as xgb
# def get_importance_features(X,Y):
# X_train, X_val, y_train, y_val = train_test_split(X, Y, test_size=0.2, random_state=1994 )
# dtrain = xgb.DMatrix(X_train, y_train,feature_names=X.columns.values)
# dval = xgb.DMatrix(X_val, y_val,feature_names=X.columns.values)
# xgb_params = {
# 'eta': 0.1,
# 'max_depth': 25,
# 'subsample': 0.9,
# 'colsample_bytree': 0.9,
# 'objective': 'binary:logistic',
# 'seed' : 10,
# 'shuffle': True,
# 'silent':1 ,
# 'n_jobs':-1
# }
# # watchlist = [(dtrain, 'train'), (dval, 'test')]
# model = xgb.train(xgb_params,dtrain,num_boost_round=50)
# xgb.plot_importance(model, height=0.5)
# plt.show()
# return model
########################################################################################
# def strip_consts(graph_def, max_const_size=32):
# """Strip large constant values from graph_def."""
# strip_def = tf.GraphDef()
# for n0 in graph_def.node:
# n = strip_def.node.add()
# n.MergeFrom(n0)
# if n.op == 'Const':
# tensor = n.attr['value'].tensor
# size = len(tensor.tensor_content)
# if size > max_const_size:
# tensor.tensor_content = "<stripped %d bytes>"%size
# return strip_def
# def show_graph(graph_def, max_const_size=32):
# """Visualize TensorFlow graph."""
# if hasattr(graph_def, 'as_graph_def'):
# graph_def = graph_def.as_graph_def()
# strip_def = strip_consts(graph_def, max_const_size=max_const_size)
# code = """
# <script>
# function load() {{
# document.getElementById("{id}").pbtxt = {data};
# }}
# </script>
# <link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
# <div style="height:600px">
# <tf-graph-basic id="{id}"></tf-graph-basic>
# </div>
# """.format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
# iframe = """
# <iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
# """.format(code.replace('"', '"'))
# display(HTML(iframe))
# def factorize_features(catgo_features,list_data):
# for c in catgo_features:
# raw_vals = np.unique(list_data[0][c])
# val_map = {}
# for i in range(len(raw_vals)):
# val_map[raw_vals[i]] = i
# for data in list_data :
# data[c]=data[c].map(val_map)
# return list_data
def StandardScaler_features(features_to_standar,data):
scaler = StandardScaler()
for c in features_to_standar:
data[c]=scaler.fit_transform(data[c].values.reshape((-1,1)))
return data
def StandardMax_features(features_to_standar,data):
for c in features_to_standar:
data[c]=data[c]/float(data[c].max())
return data
def week_of_month(tgtdate):
tgtdate = tgtdate.to_datetime()
days_this_month = calendar.mdays[tgtdate.month]
for i in range(1, days_this_month):
d = datetime.datetime(tgtdate.year, tgtdate.month, i)
if d.day - d.weekday() > 0:
startdate = d
break
# now we canuse the modulo 7 appraoch
return (tgtdate - startdate).days //7 + 1
def confusion_matrix_plot(y_true,y_pred,classs) :
conf_arr=confusion_matrix(y_true,y_pred)
norm_conf = []
for i in conf_arr:
a = 0
tmp_arr = []
a = sum(i, 0)
for j in i:
tmp_arr.append(float(j)/float(a))
norm_conf.append(tmp_arr)
fig=plt.figure(figsize = (10,7))
plt.clf()
ax = fig.add_subplot(111)
ax.set_title("Confusion Matrix")
# ax.set_aspect(1)
res = ax.imshow(np.array(norm_conf), cmap=plt.cm.Blues,
interpolation='nearest')
width, height = conf_arr.shape
for x in range(width):
for y in range(height):
ax.annotate(str(conf_arr[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center',
size=25)
cb = fig.colorbar(res)
alphabet = classs
plt.xticks(range(width), alphabet[:width])
plt.yticks(range(height), alphabet[:height])
plt.savefig('confusion_matrix.png', format='png')
plt.show()
return
def info(object, spacing=5, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [method for method in dir(object) if callable(getattr(object, method))]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print( "\n".join(["%s %s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]))
def Save_df(data,path):
data.to_pickle(path)
print("DF was saved in :"+str(path))
import multiprocessing
def _apply_df(args):
dfs, func = args
return func(dfs)
def apply(df, func,workers):
print("is working")
pool = multiprocessing.Pool(processes=workers,maxtasksperchild=500)
result = pool.map(_apply_df, [(d, func)
for d in df ])
pool.close()
return result
def get_List_from_group(df):
L=list(df)
a=[data[1] for data in L]
return a
def multithreading(df,func ,workers=40):
print("create list of DataFrame")
L=get_List_from_group(df)
result=apply(L,func ,workers)
del L
return result
# def factorize_features(catgo_features,data):
# dict_map={}
# for c in catgo_features:
# raw_vals = np.unique(data[c])
# val_map = {}
# for i in range(len(raw_vals)):
# val_map[raw_vals[i]] = i
# data[c]=data[c].map(val_map)
# dict_map[c]=val_map
# return data ,dict_map
def save_pickle(data , file_name):
with open(file_name,"wb") as fil :
pickle.dump(data,fil)
def read_pickle(file_name):
with open(file_name,"rb") as fil :
return pickle.load(fil)
def Create_year_woy_column(Data,Date_name,name=""):
Data["date"]=pd.to_datetime(Data[Date_name],format="%Y%m%d")
Data["year_woy"+name]= Data["date"].dt.year*100+ Data["date"].dt.weekofyear
del Data["date"]
############################ Noramization ########################################################""
def Quantile_Transformer(Data,columns=None,train_test=True,random_state=1994):
from sklearn.preprocessing import QuantileTransformer
qt=QuantileTransformer(output_distribution="normal", random_state=0,subsample =len(Data[0]))
if train_test :
qt_fit=qt.fit( | pd.concat(Data,axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = | Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF']) | pandas.Series |
#Library of functions called by SimpleBuildingEngine
import pandas as pd
import numpy as np
def WALLS(Btest=None):
#Building height
h_building = 2.7#[m]
h_m_building = h_building / 2
h_cl = 2.7# heigth of a storey
#number of walls
n_walls = 7
A_fl = 48
#WALLS CHARACTERISTICS
#Orientation
ori = pd.Series([('S'), ('W'), ('N'), ('E'), ('R'), ('F'), ('C')])
#Surface azimuth
surf_az = pd.Series([0, 90, 180 - 90, 0, 0, 0])
#Slopes (90:vertical; 0:horizontal)
slope = pd.Series([90, 90, 90, 90, 0, 0, 0])
#Masks
f_low_diff = pd.Series([1, 1, 1, 1, 1, 1, 1])
f_low_dir = pd.Series([1, 1, 1, 1, 1, 1, 1])
#U VALUES
U_hopw = pd.Series([0.5144, 0.5144, 0.5144, 0.5144, 0.3177, 0, 0])
U_lopw = pd.Series([3, 3, 3, 3, 3, 3, 3])
U_fr = pd.Series([2.4, 2.4, 2.4, 2.4, 2.4, 2.4, 2.4])
U_gl = pd.Series([3, 3, 3, 3, 3, 3, 3])
if (Btest == 195 or Btest == 395):
#SURFACES
#Heavy Opaque walls
A_hopw = pd.Series([21.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
elif (Btest == 200 or Btest == 210 or Btest == 230 or Btest == 240 or Btest == 250 or Btest == 400 or Btest == 410
or Btest == 420 or Btest == 430 or Btest == 800):
#Heavy Opaque walls
A_hopw = pd.Series([9.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = pd.Series([12, 0, 0, 0, 0, 0, 0])
elif (Btest == 270 or Btest == 320 or Btest == 600 or Btest == 640 or Btest == 650 or Btest == 810 or Btest == 900
or Btest == 940 or Btest == 950 or Btest == 6001 or Btest == 9001 or Btest == 6501 or Btest == 9501):
#Heavy Opaque walls
A_hopw = pd.Series([9.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([12, 0, 0, 0, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
elif (Btest == 300 or Btest == 620 or Btest == 920):
#Heavy Opaque walls
A_hopw = pd.Series([9.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([0, 6, 0, 6, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Total
A_hopw_t = A_hopw.sum()
A_wd_t = A_wd.sum()
A_fr_t = A_fr.sum()
A_lopw_t = A_lopw.sum()
A_gl_t = max(0, A_wd_t - A_fr_t)
A_t = A_hopw_t + A_lopw_t + A_wd_t + A_fr_t
#CAPACITIES
if (Btest == 800 or Btest == 900 or Btest == 920 or Btest == 940 or Btest == 950 or Btest == 9001 or Btest == 9501):
C_hopw = ([145154, 145154, 145154, 145154, 18170, 112121, 0])
C_lopw = ([0, 0, 0, 0, 0, 0, 0])
else:
C_hopw = ([14534, 14534, 14534, 14534, 18170, 19620, 0])
C_lopw = ([0, 0, 0, 0, 0, 0, 0])
C_m = sum((A_lopw * C_lopw + A_hopw * C_hopw))
#Effective mass area [m^2]
A_m = C_m ** 2 / sum((A_lopw * np.exp2(C_lopw) + A_hopw * np.exp2(C_hopw)))
return n_walls, f_low_diff, f_low_dir, ori, surf_az, slope, A_t, A_fl, A_lopw_t, A_hopw_t, A_gl_t, A_fr_t, A_lopw,\
A_hopw, A_gl, h_cl, C_m, A_m, U_hopw, U_lopw, U_fr, U_gl
def w_t_RH(p_atm=None, t=None, RH=None):
from math import exp
#Humidity ratio as function of drybulb temperature and humidity ratio
p_w_s = exp((17.438 * t / (239.78 + t)) + 6.4147)#partial pressure of saturated water vapor
p_w = RH * p_w_s
w = (p_w * 0.62198) / (p_atm - p_w)
return w
def ZENITHANG(Lat=None, Long=None, Long_st=None, n=None, h=None):
from math import pi,cos,sin,acos
from numpy import fix
#ZENITH ANGLE
#Ref: Duffie,J.A.,<NAME>. 1980. Solar engineering of thermal
#processes. 2nd Edition. <NAME> & Sons.
#OUTPUTS
# -h_sol: Solar time (in hours)
# -h_sol_per: Solar time (in hours per day)
# -phi: Latitude in radians
# -delta: Declination angle in radians
# -omega: Hour angle in radians
# -theta_z: Zenith angle in radians, i.e. angle of incidence of beam radiation on a horizontal surface
#INPUTS
# -Lat: Latitude of the location (north positive) -90<Lat<90
# -Long: Longitude of the location (west positive) 0<Long<180
# -Long_st: Longitude of the standard meridian of the time zone
# -n: day 1<n<365
# -h: hour 1<h<8760
#Angles in radians%
phi = Lat * pi / 180
#Summer time correction (Masy, 2008)
epsilon_summer = 1
#Equation of time (minutes)
B = (n - 1) * 360 / 365 * pi / 180
E = 229.2 * (0.000075 + 0.001868 * cos(B) - 0.032077 * sin(B) - 0.014615 * cos(2 * B) - 0.04089 * sin(2 * B))
#Solar time (in hours)
h_sol = h + (4 * (Long_st - Long) + E) / 60 - epsilon_summer
#Solar time (in hours per day)
h_sol_per_1 = h_sol - 24 * fix(h_sol / 24)
if h_sol_per_1 <= 1E-6:
h_sol_per = 24
else:
h_sol_per = h_sol_per_1
#Declination (angular position of the sun at solar noon, north positive)
#-23.45<delta<23.45
delta = 23.45 * sin(360 * (284 + n) / 365 * pi / 180) * pi / 180#(daily basis, Cooper in Duffie & Beckmann)
#Hour angle (morning negative, afternoon positive)
omega = (h_sol_per - 12) * 15 * pi / 180
#Zenith angle (between the vertical and the line to the sun)
theta_z = max(1E-5, acos(cos(delta) * cos(phi) * cos(omega) + sin(delta) * sin(phi)))
return phi, delta, omega, theta_z, h_sol
def CSITH(Lat=None, Long=None, Long_st=None, n=None, h=None):
from math import cos,exp
#Clear sky solar radiation
#OUTPUTS
# -I_th_cs: Clear sky theoretical solar radiation (in W/m2)
#INPUTS
# -Lat: Latitude of the location (north positive) -90<Lat<90
# -Long: Longitude of the location (west positive) 0<Long<180
# -Long_st: Longitude of the standard meridian of the time zone
# -n: day 1<n<365
# -h: hour 1<h<8760
#Main angles and solar time for location
phi, delta, omega, theta_z, h_sol = ZENITHANG(Lat, Long, Long_st, n, h)
#Extraterrestrial radiation
G_sc = 1353#W/m2 - Solar constant
I_on = G_sc * (1 + 0.033 * cos(360 * (h_sol / 24) / 365))#Normal extraterrestrial radiation
#Atmospheric transmittance for beam radiation (altitude = 0m)
tau_b = 0.12814 + 0.7568875 * exp(-0.387225 / (cos(theta_z)))
#Clear sky beam normal radiation
I_cnb = I_on * tau_b
#Clear sky horizontal beam radiation
I_cb = I_cnb * cos(theta_z)
#Atmospheric transmittance for diffuse radiation (altitude = 0m)
tau_d = 0.271 - 0.294 * tau_b
#Clear sky horizontal diffuse radiation
I_cd = I_on * tau_d * cos(theta_z)
#Total horizontal clear sky radiation
I_th_cs = max(0, (I_cb + I_cd))
#Simplified calculation (G.Masy)
I_th_cs2 = max(0, (0.7 * I_on * cos(theta_z)))
return I_th_cs,I_th_cs2
def Btest_cases(Btest=None, h=None):
if (Btest == 195 or Btest == 200):
#set points
T_i_set_h = 20
T_i_set_c = 20
#internal gain
Q_dot_appl = 0
#infiltrations
ACH_inf = 0
#SOLAR PROPERTIES
#SHGC
SHGC_gl_0 = pd.Series([0.789, 0.789, 0.789, 0.789, 0.789, 0, 0])
#IR emittance
epsilon_ir_hopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
epsilon_ir_lopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
epsilon_ir_gl = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
#Solar absorbance
alpha_hopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
alpha_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Solar Shadings
e_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #0=no solar shading; 1=interior solar shadings; 2=exterior solar shadings
mode_solshad = pd.Series([1, 1, 1, 1, 1, 0, 0]) #1=manual solar shadings; 2=automatic solar shadings
NL_ext_max = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Exterior natural lighting intensity for control of shadings
IAC_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Indoor solar Attenuation Coefficient (fraction of SHGC with solar shadings)
f_c_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Convective fraction of solar gains with solar shadings
#Ventilation
V_dot_vent = 0
elif Btest == 210 or Btest == 220:
#set points
T_i_set_h = 20
T_i_set_c = 20
#internal gain
Q_dot_appl = 0
#infiltrations
ACH_inf = 0
#SOLAR PROPERTIES
#SHGC
SHGC_gl_0 = pd.Series([0.789, 0.789, 0.789, 0.789, 0.789, 0, 0])
#IR emittance
epsilon_ir_hopw = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
epsilon_ir_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
epsilon_ir_gl = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
#Solar absorbance
alpha_hopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
alpha_lopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
#Solar Shadings
e_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #0=no solar shading; 1=interior solar shadings; 2=exterior solar shadings
mode_solshad = pd.Series([1, 1, 1, 1, 1, 0, 0]) #1=manual solar shadings; 2=automatic solar shadings
NL_ext_max = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Exterior natural lighting intensity for control of shadings
IAC_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Indoor solar Attenuation Coefficient (fraction of SHGC with solar shadings)
f_c_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Convective fraction of solar gains with solar shadings
#Ventilation
V_dot_vent = 0
elif Btest == 230:
#set points
T_i_set_h = 20
T_i_set_c = 20
#internal gain
Q_dot_appl = 0
#infiltrations
ACH_inf = 1
#SOLAR PROPERTIES
#SHGC
SHGC_gl_0 = pd.Series([0.789, 0.789, 0.789, 0.789, 0.789, 0, 0])
#IR emittance
epsilon_ir_hopw = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
epsilon_ir_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
epsilon_ir_gl = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
#Solar absorbance
alpha_hopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
alpha_lopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
#Solar Shadings
e_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #0=no solar shading; 1=interior solar shadings; 2=exterior solar shadings
mode_solshad = pd.Series([1, 1, 1, 1, 1, 0, 0]) #1=manual solar shadings; 2=automatic solar shadings
NL_ext_max = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Exterior natural lighting intensity for control of shadings
IAC_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Indoor solar Attenuation Coefficient (fraction of SHGC with solar shadings)
f_c_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Convective fraction of solar gains with solar shadings
#Ventilation
V_dot_vent = 0
elif Btest == 240:
#set points
T_i_set_h = 20
T_i_set_c = 20
#internal gain
Q_dot_appl = 200
#infiltrations
ACH_inf = 0
#SOLAR PROPERTIES
#SHGC
SHGC_gl_0 = pd.Series([0.789, 0.789, 0.789, 0.789, 0.789, 0, 0])
#IR emittance
epsilon_ir_hopw = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
epsilon_ir_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
epsilon_ir_gl = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
#Solar absorbance
alpha_hopw = | pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # IRM Analysis
# This notebook will compare the performance of IRM on an unseen platform's worth of gene expression to that of ERM. These results will be used for the preliminary data section for Aim 2 in my prelim proposal.
#
# For more information on what IRM and ERM are, read [Invariant Risk Minimization by Arjovsky et al.](https://arxiv.org/abs/1907.02893)
#
# The EDA code is [here](#EDA), or to skip to the analysis, go [here](#eval)
# <a id='eda'></a>
# ## Sepsis EDA
#
# To have a good measure of training performance, ideally we'll have one platform's data held out as a validation set. To see how possible that is, we'll do exploratory data analysis on the sepsis studies in the dataset.
# In[1]:
import itertools
import json
import os
import sys
from pathlib import Path
import pandas as pd
import sklearn.metrics as metrics
import sklearn.preprocessing as preprocessing
import torch
from plotnine import *
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from whistl import datasets
from whistl.datasets import CompendiumDataset
from whistl import models
from whistl import train
from whistl import utils
# In[2]:
import random
import numpy as np
torch.manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(42)
random.seed(42)
# In[3]:
curr_path = str(Path('.'))
map_file = str(Path('../../data/sample_classifications.pkl'))
sample_to_label = utils.parse_map_file(map_file)
sample_ids = sample_to_label.keys()
metadata_file = str(Path('../../data/all_metadata.json'))
metadata_json = json.load(open(metadata_file))
sample_metadata = metadata_json['samples']
sample_ids = utils.filter_invalid_samples(sample_metadata, sample_ids)
sample_to_platform = utils.map_sample_to_platform(metadata_json, sample_ids)
sample_to_study = utils.map_sample_to_study(metadata_json, sample_ids)
# In[4]:
compendium_path = str(Path('../../data/subset_compendium.tsv'))
compendium_df = datasets.load_compendium_file(compendium_path)
compendium_df.head()
# In[5]:
sepsis_samples = [sample for sample in sample_ids if sample_to_label[sample] == 'sepsis']
sepsis_platforms = [sample_to_platform[sample] for sample in sepsis_samples]
sepsis_studies = [sample_to_study[sample] for sample in sepsis_samples]
print(len(sepsis_samples))
print(len(sepsis_platforms))
print(len(sepsis_studies))
# In[6]:
sepsis_metadata_dict = {'sample': sepsis_samples, 'platform': sepsis_platforms, 'study': sepsis_studies}
sepsis_metadata_df = pd.DataFrame(sepsis_metadata_dict)
sepsis_metadata_df = sepsis_metadata_df.set_index('sample')
sepsis_metadata_df.head()
# In[7]:
sepsis_metadata_df['platform'].value_counts()
# In[8]:
sepsis_metadata_df[sepsis_metadata_df['platform'] == 'affymetrix human genome u133a array (hgu133a)']
# In[9]:
# Remove platform with only one sample to reduce downstream variance
sepsis_metadata_df = sepsis_metadata_df.drop(labels='GSM301847', axis=0)
print(len(sepsis_metadata_df.index))
# In[10]:
sepsis_metadata_df['study'].value_counts()
# <a id='eval'></a>
# ## IRM Evaluation
# ### Setup
# In[11]:
curr_path = os.path.dirname(os.path.abspath(os.path.abspath('')))
map_file = str(Path('../../data/sample_classifications.pkl'))
sample_to_label = utils.parse_map_file(map_file)
metadata_path = str(Path('../../data/all_metadata.json'))
compendium_path = str(Path('../../data/subset_compendium.tsv'))
# ### More setup
# Initialize the model and encoder for the training process
# In[12]:
classes = ['sepsis', 'healthy']
encoder = preprocessing.LabelEncoder()
encoder.fit(classes)
# ### Tune split
# We will get a rough estimate of performance with leave-one-out cross-validation. To know when to stop training, though, we will need a tuning dataset.
# In[13]:
tune_df = sepsis_metadata_df[sepsis_metadata_df['platform'] == 'affymetrix human genome u219 array (hgu219)']
train_df = sepsis_metadata_df[sepsis_metadata_df['platform'] != 'affymetrix human genome u219 array (hgu219)']
print(len(tune_df.index))
print(len(train_df.index))
tune_studies = tune_df['study'].unique()
tune_dataset = CompendiumDataset(tune_studies, classes,
sample_to_label, metadata_path,
compendium_path, encoder)
tune_loader = DataLoader(tune_dataset, batch_size=1)
# ### Filter Platforms
# Remove a platform that corresponds to a study present in the labeled data, but not the human compendium
# In[14]:
platforms = train_df['platform'].unique()
platforms = [p
for p in platforms
if p != 'affymetrix human human exon 1.0 st array (huex10st)'
]
num_seeds = 5
# ## Training
#
# The models are trained with two platforms held out.
# One platform (huex10st) is left out in all runs, and is used as a tuning set to determine which version of the model should be saved.
# The second platform (referred to going forward as the 'held-out platform') is held out during training, then the trained model's performance is evaluated by trying to predict whether each sample corresponds to sepsis or healthy expression.
# In[15]:
irm_result_list = []
erm_result_list = []
for hold_out_platform in platforms:
train_platforms = train_df[train_df['platform'] != hold_out_platform]['platform'].unique()
train_loaders = []
total_irm_samples = 0
for platform in train_platforms:
studies = train_df[train_df['platform'] == platform]['study']
train_dataset = CompendiumDataset([platform], classes, sample_to_label, metadata_path, compendium_path,
encoder, mode='platform')
total_irm_samples += len(train_dataset)
if len(train_dataset) > 0:
train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
train_loaders.append(train_loader)
platform_file = hold_out_platform.split('(')[-1].strip(')')
full_train_studies = train_df[train_df['platform'] != hold_out_platform]['study'].unique()
full_train_dataset = CompendiumDataset(train_platforms, classes, sample_to_label, metadata_path,
compendium_path, encoder, mode='platform')
full_train_loader = DataLoader(full_train_dataset, batch_size=8, shuffle=True)
assert total_irm_samples == len(full_train_dataset)
for seed in range(num_seeds):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
net = models.ThreeLayerNet(len(compendium_df.index))
writer_path = Path('./logs/erm_analysis_{}_{}.tfrecord'.format(platform_file, seed))
writer = SummaryWriter(writer_path)
save_file = Path('./logs/erm_analysis_{}_{}.pkl'.format(platform_file, seed))
results = train.train_with_erm(net, full_train_loader,
tune_loader, num_epochs=400,
save_file=save_file, writer=writer)
erm_result_list.append(results)
net = models.ThreeLayerNet(len(compendium_df.index))
writer_path = Path('./logs/irm_analysis_{}_{}.tfrecord'.format(platform_file, seed))
writer = SummaryWriter(writer_path)
save_file = Path('./logs/irm_analysis_{}_{}.pkl'.format(platform_file, seed))
results = train.train_with_irm(net, train_loaders,
tune_loader, num_epochs=400,
loss_scaling_factor=1, save_file=save_file,
writer=writer, burn_in_epochs=0)
irm_result_list.append(results)
# In[16]:
def eval_model(net, loader):
all_labels = []
all_preds = []
for batch in loader:
expression, labels, ids = batch
expression = expression.float().to('cuda')
labels = labels.numpy()
all_labels.extend(labels)
output = net(expression)
preds = [1 if p > 0 else 0 for p in output]
all_preds.extend(preds)
f1 = metrics.f1_score(all_labels, all_preds)
return f1
# In[17]:
irm_f1_scores = []
erm_f1_scores = []
for hold_out_platform in platforms:
for seed in range(num_seeds):
# Load data
try:
hold_out_studies = train_df[train_df['platform'] == hold_out_platform]['study']
hold_out_dataset = CompendiumDataset(hold_out_studies, classes, sample_to_label, metadata_path, compendium_path, encoder)
hold_out_loader = DataLoader(hold_out_dataset, batch_size=1, shuffle=False)
# Load IRM model
platform_file = hold_out_platform.split('(')[-1].strip(')')
save_file = Path('./logs/irm_analysis_{}_{}.pkl'.format(platform_file, seed))
net = torch.load(save_file, 'cuda')
#Evaluate ERM model
f1_score = eval_model(net, hold_out_loader)
irm_f1_scores.append(f1_score)
# Load ERM model
save_file = Path('./logs/erm_analysis_{}_{}.pkl'.format(platform_file, seed))
net = torch.load(save_file, 'cuda')
# Evaluate IRM model
f1_score = eval_model(net, hold_out_loader)
erm_f1_scores.append(f1_score)
except FileNotFoundError as e:
print(e)
# In[18]:
print(irm_f1_scores)
print(erm_f1_scores)
held_out_platform_list = []
for platform in platforms:
p = [platform] * 2 * num_seeds
held_out_platform_list.extend(p)
#print(held_out_platform_list)
score_list = list(itertools.chain(*zip(irm_f1_scores, erm_f1_scores)))
print(score_list)
label_list = (['irm'] + ['erm']) * (len(score_list) // 2)
print(label_list)
# In[29]:
held_out_platform_list = [plat.split('(')[-1].strip(')') for plat in held_out_platform_list]
result_dict = {'f1_score': score_list, 'irm/erm': label_list, 'held_out_platform': held_out_platform_list}
result_df = | pd.DataFrame(result_dict) | pandas.DataFrame |
def test_get_number_rows_cols_for_fig():
from mspypeline.helpers import get_number_rows_cols_for_fig
assert get_number_rows_cols_for_fig([1, 1, 1, 1]) == (2, 2)
assert get_number_rows_cols_for_fig(4) == (2, 2)
def test_fill_dict():
from mspypeline.helpers import fill_dict
def test_default_to_regular():
from mspypeline.helpers import default_to_regular
from collections import defaultdict
d = defaultdict(int)
d["a"] += 1
assert isinstance(d, defaultdict)
d = default_to_regular(d)
assert isinstance(d, dict)
assert not isinstance(d, defaultdict)
def test_get_analysis_design():
from mspypeline.helpers import get_analysis_design
assert get_analysis_design(["A1_1", "A1_2", "A2_1", "A2_2"]) == {
'A1': {'1': 'A1_1', '2': 'A1_2'},
'A2': {'1': 'A2_1', '2': 'A2_2'}
}
assert get_analysis_design(["A_1_1"]) == {"A": {"1": {"1": "A_1_1"}}}
def test_plot_annotate_line():
from mspypeline.helpers import plot_annotate_line
def test_venn_names():
from mspypeline.helpers import venn_names
def test_install_r_dependencies():
from mspypeline.helpers.Utils import install_r_dependencies
def test_get_number_of_non_na_values():
from mspypeline.helpers import get_number_of_non_na_values as gna
assert gna(20) > gna(10) > gna(5) > gna(3)
assert gna(3) == gna(2) and gna(3) == gna(1)
def test_get_intersection_and_unique():
from mspypeline.helpers import get_intersection_and_unique
import pandas as pd
df1 = pd.DataFrame()
df2 = pd.DataFrame()
assert all(map(pd.Series.equals,
get_intersection_and_unique(df1, df2),
(pd.Series([], dtype=bool), pd.Series([], dtype=bool), pd.Series([], dtype=bool))))
df1 = pd.DataFrame([[1, 1, 1], [1, 1, 1], [0, 0, 0], [1, 0, 0]])
df2 = | pd.DataFrame([[1, 1, 1], [0, 0, 0], [1, 1, 1], [1, 0, 0]]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 14:42:02 2018
@author: rwilson
"""
import pandas as pd
import numpy as np
import scipy.linalg as linalg
import random
import os
import h5py
import matplotlib.pyplot as plt
import itertools
from numba import njit
from numba import prange
import os
import shutil
import gc
class utilities():
''' Some helper functions
'''
def src_rec_pairs(channels, exclude=None, reciprocity=False, randSample=None):
'''Generate a list of source receiver pairs for all excluding a certain
channels.
Parameters
----------
channels : list
list of channels from which src rec pairs should be generated
exclude : list (Default = None)
list of channels which should be excluded from the list of channels
reciprocity : bool (Default = False)
Include reciprocal pairs.
randSample : int (Default = None)
Extract a random subset from the list of length ``randSample``
Returns
-------
src_rec : list
list of unique source receiver pairs
'''
if reciprocity:
src_rec = [(i, j) for i in channels for j in channels if i!=j and
i!=np.all(exclude) and
j!=np.all(exclude)]
elif not reciprocity:
src_rec = [(i, j) for i in channels for j in channels if i!=j and
i!=np.all(exclude) and
j!=np.all(exclude) and
i<j]
if randSample:
return random.sample(src_rec, randSample)
else:
return src_rec
def read_channelPos(file, dimensions):
'''Read in csv containing each channel position. Currently expecting that
the channel position csv is of a specific type and needs shifting to bottom
zeroed coord. system.
Parameters
----------
file : str
Location of csv containing the channel locations
dimensions : dict
The ``height`` of the mesh.
Returns
-------
dfChan : DataFrame
Database of each channel location
'''
dfChan = pd.read_csv(file,
delim_whitespace=True, skiprows=2, usecols=[0,1,2,3,4])
dfChan.index = dfChan.index.droplevel()
dfChan.drop(inplace=True, columns=dfChan.columns[-2:].tolist())
dfChan.columns = ['x','y','z']
# Shift coords to mesh bottom zeroed
dfChan.z = dfChan.z + np.abs(dfChan.z.min()) + dimensions['height']/2 - np.abs(dfChan.z.min())
print('Channel Positions:\n', [(dfChan.iloc[i].x, dfChan.iloc[i].y, dfChan.iloc[i].z)
for i in range(dfChan.shape[0])])
print('Channel index:\n',[str(chan)
for _,chan in enumerate(dfChan.index.values)])
return dfChan
def HDF5_data_save(HDF5File, group, name, data, attrb={'attr': 0}, ReRw='w'):
'''Saves data into a hdf5 database, if data name already exists, then an
attempt to overwrite the data will be made
Parameters
----------
HDF5File : str
Relative location of database
group : str
The expected group name
name : str
The name of the data to be saved within group
attrb : dict
attribute dictionary to store along with the database.
ReRw : str (Default = 'w')
The read/write format
'''
toscreen = '----- Attributes added to database %s %s, table %s ----- \n' \
%(HDF5File,group, name)
with h5py.File(HDF5File, ReRw) as f:
try:
dset = f.create_dataset(os.path.join(group, name), data=data, dtype='f')
print(toscreen)
for key,item in zip(attrb.keys(), attrb.values()):
print('Key:', key,'| item:', item)
dset.attrs[key] = item
except RuntimeError:
del f[os.path.join(group, name)]
dset = f.create_dataset(os.path.join(group, name), data=data, dtype='f')
print(toscreen)
for key,item in zip(attrb.keys(), attrb.values()):
print('Key:', key,'| item:', item)
dset.attrs[key] = item
def HDF5_data_del(HDF5File, group, names):
'''Deletes data from a hdf5 database within some group.
Parameters
----------
HDF5File : str
Relative location of database
group : str
The expected group name
names : str
The names of the data groups to be deleted from ``group``
'''
with h5py.File(HDF5File, 'a') as f:
for name in names:
try:
path = os.path.join(group,name)
del f[path]
except KeyError:
print(name, "was not in", group)
def HDF5_data_read(HDF5File, group, name, ReRw='r'):
'''Saves data into a hdf5 database
Parameters
----------
HDF5File : str
Relative location of database
group : str
The expected group name
attrb : tuple/list
attribute to store along with the database.
ReRw : str (Default = 'w')
The read/write format
Returns
-------
dset : ()
Data contained within group/name
'''
with h5py.File(HDF5File, ReRw) as f:
dset = f[os.path.join(group,name)].value
return dset
def HDF5_attri_read(HDF5File, group, name, ReRw='r'):
'''Read keys and attributes from hdf5 database.
Parameters
----------
HDF5File : str
Relative location of database
group : str
The expected group name
attrb : tuple/list
attribute to store along with the database.
ReRw : str (Default = 'w')
The read/write format
Returns
-------
dic : dict
A dictionary of all the attributes stored within the group/name.
'''
with h5py.File(HDF5File, ReRw) as f:
return {item[0]:item[1] for item in f[os.path.join(group,name)].attrs.items()}
def WindowTcent(TS, wdws):
'''Determine the centre of each correlation window in time from the input
time-series database.
Parameters
----------
TS : float
Sampling period
wdws : list(str)
Containing the windows range in sample points separated by -
'''
wdws_cent = [int(np.mean([int(wdw.split('-')[0]),
int(wdw.split('-')[1]) ])) for wdw in wdws]
wdws_cent = np.array(wdws_cent) * TS
return wdws_cent
def DiffRegress(Tseries, dfChan, Emaxt0, plotOut=False):
'''Perform linear regression to fit the 1D diffusion equation to an input
time series. The output of this function is an estimation of the
diffusivity and dissipation. (<NAME> et. al. 2001)
Parameters
----------
Tseries : array-like
The input time series
dfChan : DataFrame
Containing the channel positsion columns x, y, z
Emaxt0 : int
The index corresponding to the arrival time (onset of) maximum energy
Returns
-------
popt[1] : float
The diffusitivty determined from the least squared fit.
Units depends upon input t and z units check units
popt[2] : float
The Dissipation
'''
from scipy import optimize
# Determine absolute distance between source and receiver
recPos = dfChan.loc[Tseries['recNo']]
srcPos = dfChan.loc[Tseries['srcNo']]
absDist = np.sqrt(abs(recPos.x - srcPos.x)**2 +
abs(recPos.y - srcPos.y)**2 +
abs(recPos.z - srcPos.z)**2)
# Define the 1D diffusion equation, logE(z,t)
def diffusivity(t, z, D, sigma):
return np.log(1/(2*np.sqrt(np.pi*D))) \
- 0.5*np.log(t) - z**2/(4*D*t) - sigma*t
# The energy density
y_data = np.log(Tseries['Tseries']**2)[Emaxt0:]
# The time axis zeroed to the onset of Emaxt0
x_data = (np.arange(0, Tseries['TracePoints']) *
Tseries['TSamp'])[Emaxt0-1:]
x_data = (x_data-x_data[0])[1:]
popt, pcov = optimize.curve_fit(diffusivity,
x_data,
y_data,
p0=[absDist, 1, 1],
bounds=([absDist*0.9, 0.1, 0.1],
[absDist*1.1, np.inf, np.inf]))
if plotOut:
# Plot the resulting fit
plt.figure(figsize=(6, 4))
plt.scatter(x_data, y_data, label='Data')
plt.plot(x_data, diffusivity(x_data, popt[0], popt[1], popt[2]),
label='Fitted function', color='red')
plt.legend(loc='best')
plt.show()
return popt[1], popt[2]
def src_recNo(CCdata):
'''Extract the source receiver paris within CCdata, excluding common pairs.
Parameters
----------
CCdata : dataframe
CCdata dataframe
Returns
-------
src_recNo : list
List of the source receiver numbers
'''
src_rec = list(sorted(set(
[(srcNo, recNo) for srcNo, recNo in
zip(CCdata.index.get_level_values('srcNo'),
CCdata.index.get_level_values('recNo')) if
srcNo != recNo]
)))
return src_rec
def traceAttributes(SurveyDB, Col):
'''Extract a single trace and its attributes from single survey dataframe,
into a dictionary.
Parameters
----------
TStrace : DataFrame
Containing all traces for a single survey.
Col : int
The column to extract from the database.
Returns
-------
traceDict: dict
Containing the trace along with all header information.
'''
traceDict = {key: SurveyDB.columns.get_level_values(key)[Col] for key in
SurveyDB.columns.names}
traceDict['Tseries'] = SurveyDB.iloc[:, Col].values
return traceDict
def d_obs_time(CCdata, src_rec, lag, window, parameter='CC', staTime=None, stopTime=None):
'''Construct the d_obs dataframe over time from the input CCdata, for a
select list of source-receiver pairs.
Parameters
----------
CCdata : dataframe
CCdata dataframe
src_rec : list(tuples)
A list of tuples for each source receiver pair.
lag : list(tuples)
The lag value from which the extraction is made.
window : str/list(str)
string of windows or list of str of windows.
parameter : str (Default='CC')
Parameter from which to extract from the dataframe
staTime : str
The start time from which ``d_obs`` is extracted
stopTime : str
The stop time before which ``d_obs`` is extracted.
Returns
-------
d_obs_time : dataframe
dataframe containing the in each row the d_obs vector for all requested
source-receiver pairs, increasing with time.
'''
if staTime and stopTime:
mask = (pd.to_datetime(CCdata.index.get_level_values('Time')) >
pd.to_datetime(staTime)) & \
(pd.to_datetime(CCdata.index.get_level_values('Time')) <
pd.to_datetime(stopTime))
CCdata = CCdata.copy().loc[mask]
elif staTime:
mask = (pd.to_datetime(CCdata.index.get_level_values('Time')) >
pd.to_datetime(staTime))
CCdata = CCdata.copy().loc[mask]
elif stopTime:
mask = (pd.to_datetime(CCdata.index.get_level_values('Time')) <
pd.to_datetime(stopTime))
CCdata = CCdata.copy().loc[mask]
# Time index for each survey based on second src_rec pair.
time_index = np.array([0])
for sr in src_rec:
index = pd.to_datetime(CCdata.loc[([sr[0]], [sr[1]]),
(lag, window[0], parameter)].
unstack(level=[0, 1]).index)
if index.shape[0]>time_index.shape[0]:
time_index = index
if len(window)>1:
temp = []
for wdw in window:
df = pd.concat([CCdata.loc[([sr[0]], [sr[1]]),
(lag, wdw, parameter)].
unstack(level=[0, 1]).
reset_index(drop=True) for
sr in src_rec], axis=1)
temp.append(df)
d_obs_time = pd.concat(temp, axis=1)
else:
d_obs_time = pd.concat([CCdata.loc[([sr[0]], [sr[1]]),
(lag, window, parameter)].
unstack(level=[0, 1]).
reset_index(drop=True) for
sr in src_rec], axis=1)
d_obs_time.index = time_index
return d_obs_time.dropna().astype(float)
def measNorange(CCdata, staTime, endTime):
'''Determines the measurement survey number between given time interval.
This function is intended to allow the user to quickly determine the measurement
number range of interest, thereby allowing the reporcessing of the raw data
over this region only. This requires that the user passes a CCdata which
represents the entire raw dataset.
Parameters
----------
CCdata : dataframe
CCdata dataframe
staTime : str
The start time of the interval/.
endTime : str
The start time of the interval/.
Returns
-------
None : tuple
Measurement survey numbers within the range given.
'''
mask = (pd.to_datetime(CCdata.index.get_level_values('Time').values) > pd.to_datetime(staTime)) & \
(pd.to_datetime(CCdata.index.get_level_values('Time').values) < pd.to_datetime(endTime))
measNo = [i for i, x in enumerate(mask) if x]
return (measNo[0], measNo[-1])
def surveyNorange(TSsurveys, staTime, endTime):
'''Determines the survey number between given time interval.
This function is intended to allow the user to quickly determine the survey
number range of interest, thereby allowing the reporcessing of the raw data
over this region only. This requires that the user passes a CCdata which
represents the entire raw dataset.
Parameters
----------
TSsurveys : list
The survey folder numbers
staTime : str
The start time of the interval/.
endTime : str
The start time of the interval/.
Returns
-------
None : tuple
Measurement survey numbers within the range given.
'''
surveyTimes = [pd.to_datetime(group.split('survey')[1]) for group in
TSsurveys]
mask = [(group > | pd.to_datetime(staTime) | pandas.to_datetime |
__author__ = '<NAME>'
from sklearn.cluster import KMeans
import scipy.stats as stats
from collections import OrderedDict
import matplotlib.pyplot as plt
import math
import pandas as pd
import numpy as np
import operator
class Optimizer(object):
"""
Head of opmizer operations.
Provide statistic and ML algorithms in code execution.
Argumens:
performance: configuration performance data in execution
fitted_population: population after fittness function run
"""
def __init__(self, performance, fitted_population):
self.performance = performance
self.fitted_population = fitted_population
def elbow(self):
"""
Find elbow point inside KMeans clustering.
To get the most effective number of clusters according to the dataset
Returns:
number of cluster in the (elbow)
"""
sum_of_squared_distances = []
for cluster in range(1, int(self.fitted_population.shape[0]) + 1):
clusters_no = KMeans(n_clusters=cluster)
clusters_no = clusters_no.fit(self.fitted_population[['Chromosome', 'Total']])
sum_of_squared_distances.append(clusters_no.inertia_)
# plt.plot(range(1, int(self.population.shape[0])), Sum_of_squared_distances, 'bx-')
# plt.xlabel('cluster number')
# plt.ylabel('Sum_of_squared_distances')
# plt.title('Elbow method for optimal number of clusters')
# plt.show()
return self.linear_group_size(sum_of_squared_distances)
def linear_group_size(self, sum_of_squared_distances):
"""
Algorithms for elbow method.
It is a normal fuction which connects vertex of integrals and
calculates height of points:
Returns:
The highest point distance
"""
slope = (sum_of_squared_distances[int(self.fitted_population.shape[0])-1] - sum_of_squared_distances[0]) / (int(self.fitted_population.shape[0]) - 1)
intercept = sum_of_squared_distances[0] - slope
distance = []
for label in range(len(sum_of_squared_distances)):
distance.append(abs((slope * label) - (sum_of_squared_distances[label]) + intercept)/(math.sqrt(slope**2 + intercept**2)))
return distance.index(max(distance))
def group_population(self, save=False, data=False):
"""
Use KMeans clustering alogrithms form scikit-learn framework to cluster groups of parents
in order to perform parent selection later.
Returns:
population_groups: get labels and cluster centers in posterior parent selection algorithm
"""
self.fitted_population['Chromosome'] = self.fitted_population['Chromosome'] * self.performance['chromosome_weight']
population_groups = KMeans(n_clusters=self.elbow())
population_groups.fit(self.fitted_population[['Chromosome', 'Total']])
self.fitted_population = pd.concat([self.fitted_population, pd.Series(self.change_order(population_groups), name='Labels')], axis=1)
# plt.title('Fitted chromosomes groups')
# plt.xlabel('Number of chromosome')
# plt.ylabel('Total fitted value')
# plt.scatter(self.fitted_population['Chromosome'], self.fitted_population['Total'], c=population_groups.labels_)
# plt.scatter(population_groups.cluster_centers_[:,0], population_groups.cluster_centers_[:,1], marker='x')
# plt.show()
if (save and data):
plt.title('Fitted chromosomes groups')
plt.xlabel('Number of chromosome')
plt.ylabel('Total fitted value')
plt.scatter(self.fitted_population['Chromosome'], self.fitted_population['Total'], c=population_groups.labels_)
plt.scatter(population_groups.cluster_centers_[:,0], population_groups.cluster_centers_[:,1], marker='x')
# plt.show()
plt.savefig(data)
else:
return population_groups
@staticmethod
def change_order(population_groups) -> list:
"""
By default number sign of clusters are selected randomly which affects wrongly estimation of distances between
cluster centers later.
To avoid this problem this function change every data set order from the smallest to the biggest.
Args:
population_groups: unordered population groups
Returns:
new_order: population groups with ascending order of notation
"""
order = {}
pivot_order = {}
new_order = []
for k,v in enumerate(population_groups.cluster_centers_):
order[int(k)] = v[1]
pivot_order = list(OrderedDict(sorted(order.items(), key=operator.itemgetter(1))).keys())
for label in list(population_groups.labels_):
new_order.append(pivot_order.index(int(label)))
return new_order
# ---------------------------------------------------------------------------------
@staticmethod
def cluster_distances(population_groups) -> list:
"""
Measure distance between each cluster recursively in order to set list of data
converted later for probability in roulette wheel
Args:
population_groups: ouput generated from KMeans
Returns:
centroid_dists: list of distances between centroids measurement
"""
centroid_dists = []
centroids = list(population_groups.cluster_centers_)
centroids.sort(key=lambda x: x[1])
for destination_cluster, value in enumerate(centroids):
dists = []
for source_cluster, value in enumerate(centroids):
if (centroids[source_cluster][0] == centroids[destination_cluster][0]) and (centroids[source_cluster][1] == centroids[destination_cluster][1]):
pass
else:
dist = math.sqrt((centroids[source_cluster][0] - centroids[destination_cluster][0])**2 + (centroids[source_cluster][1] - centroids[destination_cluster][1])**2)
dists.append(dist)
centroid_dists.append(dists)
return centroid_dists
def roulette_wheel_selection(self, population_groups) -> dict:
"""
Exchange cluster distances into probability of cluster selection in second parent selection/
Args:
population_groups: ouput generated from KMeans
Returns:
probability: dict of each cluster probabilities to draw second parent in child creation
"""
centroid_dists = self.cluster_distances(population_groups)
max_prob = [sum(dist) for dist in centroid_dists]
cluster = 0
probability = {}
for centroid in centroid_dists:
dist = []
for target in centroid:
dist.append(target / max_prob[cluster])
dist.insert(cluster, 0)
probability[cluster] = dist
cluster += 1
return probability
def next_generation(self, population_groups, population):
"""
Develop next generation of population according to fitted population ingredients
Args:
population_groups: ouput generated from KMeans
population: clean population dataset without add ons
Returns:
population: new population with inserted childrens and worse parent removed
"""
probability = self.roulette_wheel_selection(population_groups)
self.fitted_population['Chromosome'] = self.fitted_population['Chromosome'] / self.performance['chromosome_weight']
self.fitted_population['Selected'] = pd.Series(data=[False for row in range(self.fitted_population.shape[0])])
# print(self.fitted_population)
try:
similarity = 0
while ((self.fitted_population['Selected'] == True).sum() / self.fitted_population.shape[0]) < self.performance['shuffle_scale']:
first_parent = None
second_parent = None
while True:
first_parent = self.choose_first_parent()
second_parent = self.choose_second_parent(probability, first_parent)
if int(first_parent.isnull().sum(axis = 1)) == int(second_parent.isnull().sum(axis=1)):
break
F_test = stats.f_oneway(first_parent.iloc[:, 0: self.fitted_population.columns.get_loc('Total')].values[0], second_parent.iloc[:, 0: self.fitted_population.columns.get_loc('Total')].values[0])
worse_parent_total = min(float(first_parent['Total']), float(second_parent['Total']))
better_parent_total = max(float(first_parent['Total']), float(second_parent['Total']))
parent_survived = first_parent if int(first_parent['Labels']) > int(second_parent['Labels']) else second_parent
child = self.child(first_parent, second_parent)
child_F_test = stats.f_oneway(child.iloc[:, 0: self.fitted_population.columns.get_loc('Total')].values[0], parent_survived.iloc[:, 0: self.fitted_population.columns.get_loc('Total')].values[0])
# print(self.fitted_population)
if (child_F_test[0] != 0) and (F_test[0] > child_F_test[0]) and ((better_parent_total + worse_parent_total) < (better_parent_total + float(child['Total'])) * self.performance['variety']):
child = pd.concat([child, pd.Series(int(second_parent['Chromosome']) if int(first_parent['Labels']) > int(second_parent['Labels']) else int(first_parent['Chromosome']), name='Chromosome'), pd.Series(min(int(first_parent['Labels']), int(second_parent['Labels'])), name='Labels'), | pd.Series(True, name='Selected') | pandas.Series |
import ast
import importlib
import re
import numpy as np
import pandas as pd
import woodwork as ww
def import_or_none(library):
'''
Attemps to import the requested library.
Args:
library (str): the name of the library
Returns: the library if it is installed, else None
'''
try:
return importlib.import_module(library)
except ImportError:
return None
def camel_to_snake(s):
s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower()
def _convert_input_to_set(semantic_tags, error_language='semantic_tags'):
"""Takes input as a single string, a list of strings, or a set of strings
and returns a set with the supplied values. If no values are supplied,
an empty set will be returned."""
if not semantic_tags:
return set()
if type(semantic_tags) not in [list, set, str]:
raise TypeError(f"{error_language} must be a string, set or list")
if isinstance(semantic_tags, str):
return {semantic_tags}
if isinstance(semantic_tags, list):
semantic_tags = set(semantic_tags)
if not all([isinstance(tag, str) for tag in semantic_tags]):
raise TypeError(f"{error_language} must contain only strings")
return semantic_tags
def _get_mode(series):
"""Get the mode value for a series"""
mode_values = series.mode()
if len(mode_values) > 0:
return mode_values[0]
return None
def read_csv(filepath=None,
name=None,
index=None,
time_index=None,
semantic_tags=None,
logical_types=None,
use_standard_tags=True,
**kwargs):
"""Read data from the specified CSV file and return a Woodwork DataTable
Args:
filepath (str): A valid string path to the file to read
name (str, optional): Name used to identify the datatable.
index (str, optional): Name of the index column in the dataframe.
time_index (str, optional): Name of the time index column in the dataframe.
semantic_tags (dict, optional): Dictionary mapping column names in the dataframe to the
semantic tags for the column. The keys in the dictionary should be strings
that correspond to columns in the underlying dataframe. There are two options for
specifying the dictionary values:
(str): If only one semantic tag is being set, a single string can be used as a value.
(list[str] or set[str]): If multiple tags are being set, a list or set of strings can be
used as the value.
Semantic tags will be set to an empty set for any column not included in the
dictionary.
logical_types (dict[str -> LogicalType], optional): Dictionary mapping column names in
the dataframe to the LogicalType for the column. LogicalTypes will be inferred
for any columns not present in the dictionary.
use_standard_tags (bool, optional): If True, will add standard semantic tags to columns based
on the inferred or specified logical type for the column. Defaults to True.
**kwargs: Additional keyword arguments to pass to the underlying ``pandas.read_csv`` function. For more
information on available keywords refer to the pandas documentation.
Returns:
woodwork.DataTable: DataTable created from the specified CSV file
"""
dataframe = pd.read_csv(filepath, **kwargs)
return ww.DataTable(dataframe,
name=name,
index=index,
time_index=time_index,
semantic_tags=semantic_tags,
logical_types=logical_types,
use_standard_tags=use_standard_tags)
def _new_dt_including(datatable, new_data):
'''
Creates a new DataTable with specified data and columns
Args:
datatable (DataTable): DataTable with desired information
new_data (DataFrame): subset of original DataTable
Returns:
DataTable: New DataTable with attributes from original DataTable but data from new DataTable
'''
cols = new_data.columns
new_logical_types = {}
new_semantic_tags = {}
new_column_descriptions = {}
new_column_metadata = {}
for col_name, col in datatable.columns.items():
if col_name not in cols:
continue
new_logical_types[col_name] = col.logical_type
new_semantic_tags[col_name] = col.semantic_tags
new_column_descriptions[col_name] = col.description
new_column_metadata[col_name] = col.metadata
new_index = datatable.index if datatable.index in cols else None
new_time_index = datatable.time_index if datatable.time_index in cols else None
if new_index is not None:
new_semantic_tags[new_index] = new_semantic_tags[new_index].difference({'index'})
if new_time_index is not None:
new_semantic_tags[new_time_index] = new_semantic_tags[new_time_index].difference({'time_index'})
return ww.DataTable(new_data,
name=datatable.name,
index=new_index,
time_index=new_time_index,
semantic_tags=new_semantic_tags,
logical_types=new_logical_types,
use_standard_tags=datatable.use_standard_tags,
table_metadata=datatable.metadata,
column_metadata=new_column_metadata,
column_descriptions=new_column_descriptions)
def import_or_raise(library, error_msg):
'''
Attempts to import the requested library. If the import fails, raises an
ImportError with the supplied error message.
Args:
library (str): the name of the library
error_msg (str): error message to return if the import fails
'''
try:
return importlib.import_module(library)
except ImportError:
raise ImportError(error_msg)
def _is_s3(string):
'''
Checks if the given string is a s3 path.
Returns a boolean.
'''
return "s3://" in string
def _is_url(string):
'''
Checks if the given string is an url path.
Returns a boolean.
'''
return 'http' in string
def _reformat_to_latlong(latlong, use_list=False):
"""Reformats LatLong columns to be tuples of floats. Uses np.nan for null values.
"""
if _is_null_latlong(latlong):
return np.nan
if isinstance(latlong, str):
try:
# Serialized latlong columns from csv or parquet will be strings, so null values will be
# read as the string 'nan' in pandas and Dask and 'NaN' in Koalas
# neither of which which is interpretable as a null value
if 'nan' in latlong:
latlong = latlong.replace('nan', 'None')
if 'NaN' in latlong:
latlong = latlong.replace('NaN', 'None')
latlong = ast.literal_eval(latlong)
except ValueError:
pass
if isinstance(latlong, (tuple, list)):
if len(latlong) != 2:
raise ValueError(f'LatLong values must have exactly two values. {latlong} does not have two values.')
latitude, longitude = map(_to_latlong_float, latlong)
# (np.nan, np.nan) should be counted as a single null value
if pd.isnull(latitude) and | pd.isnull(longitude) | pandas.isnull |
## 1. Introduction ##
import pandas as pd
titanic_survival = | pd.read_csv("titanic_survival.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
""" test function application """
import pytest
from string import ascii_lowercase
from pandas import (date_range, Timestamp,
Index, MultiIndex, DataFrame, Series)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.compat import product as cart_product
import numpy as np
import pandas.util.testing as tm
import pandas as pd
from .common import MixIn
# describe
# --------------------------------
class TestDescribe(MixIn):
def test_apply_describe_bug(self):
grouped = self.mframe.groupby(level='first')
grouped.describe() # it works!
def test_series_describe_multikey(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
assert_series_equal(result['mean'], grouped.mean(), check_names=False)
assert_series_equal(result['std'], grouped.std(), check_names=False)
assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
assert_series_equal(result, expected)
def test_series_index_name(self):
grouped = self.df.loc[:, ['C']].groupby(self.df['A'])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == 'A'
def test_frame_describe_multikey(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in self.tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
labels=[[0] * len(group.columns), range(len(group.columns))])
group = pd.DataFrame(group.values,
columns=group_col,
index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = self.tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
expected = self.tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
labels=[[0, 0, 1, 1], range(len(expected.index))])
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex(self):
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
'y': [10, 20, 30, 40, 50] * 3,
'z': [100, 200, 300, 400, 500] * 3})
df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={'k': 'key'})
pytest.raises(ValueError, lambda: df1.groupby('k').describe())
pytest.raises(ValueError, lambda: df2.groupby('key').describe())
def test_frame_describe_unstacked_format(self):
# GH 4792
prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
df = pd.DataFrame({'PRICE': prices,
'VOLUME': volumes})
result = df.groupby('PRICE').VOLUME.describe()
data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
expected = pd.DataFrame(data,
index=pd.Index([24990, 25499], name='PRICE'),
columns=['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# nunique
# --------------------------------
class TestNUnique(MixIn):
def test_series_groupby_nunique(self):
def check_nunique(df, keys, as_index=True):
for sort, dropna in cart_product((False, True), repeat=2):
gr = df.groupby(keys, as_index=as_index, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, as_index=as_index, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
if not as_index:
right = right.reset_index(drop=True)
assert_series_equal(left, right, check_names=False)
days = date_range('2015-08-23', periods=10)
for n, m in cart_product(10 ** np.arange(2, 6), (10, 100, 1000)):
frame = DataFrame({
'jim': np.random.choice(
list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)
})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
check_nunique(frame, ['jim'], as_index=False)
check_nunique(frame, ['jim', 'joe'], as_index=False)
def test_nunique(self):
df = DataFrame({
'A': list('abbacc'),
'B': list('abxacc'),
'C': list('abbacx'),
})
expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
result = df.groupby('A', as_index=False).nunique()
tm.assert_frame_equal(result, expected)
# as_index
expected.index = list('abc')
expected.index.name = 'A'
result = df.groupby('A').nunique()
tm.assert_frame_equal(result, expected)
# with na
result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
"""
Copyright 2021 <NAME> - <EMAIL>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from datetime import datetime, timedelta
import ee
import numpy as np
import pandas as pd
from geedim import export, collection, root_path, info, image
from tests.util import _test_image_file, _test_search_results, _setup_test
class TestApi(unittest.TestCase):
""" Class to test backend (API) search, composite and export functionality. """
@classmethod
def setUpClass(cls):
""" Initialise Earth Engine once for all the tests here. """
_setup_test()
def _test_image(self, image_id, mask=False):
""" Test the validity of a geedim.image.MaskedImage by checking metadata. """
ee_coll_name = image.split_id(image_id)[0]
gd_coll_name = info.ee_to_gd[ee_coll_name]
gd_image = image.get_class(gd_coll_name).from_id(image_id, mask=mask)
self.assertTrue(gd_image.id == image_id, 'IDs match')
sr_band_df = pd.DataFrame.from_dict(info.collection_info[gd_coll_name]['bands'])
for key in ['bands', 'properties', 'id', 'crs', 'scale']:
self.assertTrue(key in gd_image.info.keys(), msg='Image gd_info complete')
self.assertTrue(gd_image.info[key] is not None, msg='Image gd_info complete')
self.assertTrue(gd_image.scale > 0 and gd_image.scale < 5000, 'Scale in range')
self.assertTrue(gd_image.crs != 'EPSG:4326', 'Non wgs84')
im_band_df = pd.DataFrame.from_dict(gd_image.info['bands'])
self.assertTrue(im_band_df.shape[0] >= sr_band_df.shape[0], 'Enough bands')
for id in ['VALID_MASK', 'CLOUD_MASK', 'SHADOW_MASK', 'FILL_MASK', 'SCORE']:
self.assertTrue(id in im_band_df.id.values, msg='Image has auxiliary bands')
for id in sr_band_df.id.values:
self.assertTrue(id in im_band_df.id.values, msg='Image has SR bands')
# test reflectance statistics for a specific region
region = {"type": "Polygon",
"coordinates": [[[24, -33.6], [24, -33.53], [23.93, -33.53], [23.93, -33.6], [24, -33.6]]]}
sr_band_ids = sr_band_df.id.tolist()
sr_image = gd_image.ee_image.select(sr_band_ids)
std_refl = sr_image.reduceRegion(reducer='stdDev', geometry=region, scale=2 * gd_image.scale).getInfo()
self.assertTrue(all(np.array(list(std_refl.values())) > 100), 'Std(SR) > 100')
def test_image(self):
""" Test geedim.image.MaskedImage sub-classes. """
im_param_list = [
{'image_id': 'COPERNICUS/S2_SR/20190321T075619_20190321T081839_T35HKC', 'mask': False},
{'image_id': 'LANDSAT/LC08/C02/T1_L2/LC08_172083_20190301', 'mask': True},
{'image_id': 'MODIS/006/MCD43A4/2019_01_01', 'mask': True},
]
for im_param_dict in im_param_list:
with self.subTest('Image', **im_param_dict):
self._test_image(**im_param_dict)
def test_search(self):
""" Test search on all supported image collections. """
region = {"type": "Polygon",
"coordinates": [[[24, -33.6], [24, -33.53], [23.93, -33.53], [23.93, -33.6], [24, -33.6]]]}
start_date = datetime.strptime('2019-02-01', '%Y-%m-%d')
end_date = start_date + timedelta(days=32)
valid_portion = 10
for gd_coll_name in info.gd_to_ee.keys():
with self.subTest('Search', gd_coll_name=gd_coll_name):
gd_collection = collection.Collection(gd_coll_name)
res_df = gd_collection.search(start_date, end_date, region, valid_portion=valid_portion)
_test_search_results(self, res_df, start_date, end_date, valid_portion=valid_portion)
def test_download(self):
""" Test download of images from different collections, and with different crs, and scale params. """
region = {"type": "Polygon",
"coordinates": [[[24, -33.6], [24, -33.53], [23.93, -33.53], [23.93, -33.6], [24, -33.6]]]}
im_param_list = [
{'image_id': 'COPERNICUS/S2_SR/20190321T075619_20190321T081839_T35HKC', 'mask': True, 'crs': None,
'scale': 30, 'resampling': 'bilinear'},
{'image_id': 'LANDSAT/LC08/C02/T1_L2/LC08_172083_20190301', 'mask': True, 'crs': None, 'scale': None,
'resampling': 'bicubic'},
{'image_id': 'MODIS/006/MCD43A4/2019_01_01', 'mask': True, 'crs': 'EPSG:3857', 'scale': 500,
'resampling': 'near'},
]
for impdict in im_param_list:
ee_coll_name = image.split_id(impdict['image_id'])[0]
gd_coll_name = info.ee_to_gd[ee_coll_name]
with self.subTest('Download', **impdict):
# create image.MaskedImage
gd_image = image.get_class(gd_coll_name)._from_id(impdict["image_id"], mask=impdict['mask'], region=region)
# create a filename for these parameters
name = impdict["image_id"].replace('/', '-')
crs_str = impdict["crs"].replace(':', '_') if impdict["crs"] else 'None'
filename = root_path.joinpath(f'data/outputs/tests/{name}_{crs_str}_{impdict["scale"]}m.tif')
export.download_image(gd_image, filename, region=region, crs=impdict["crs"], scale=impdict["scale"],
resampling=impdict["resampling"], overwrite=True)
impdict.pop('image_id')
_test_image_file(self, image_obj=gd_image, filename=filename, region=region, **impdict)
def test_export(self):
""" Test export of an image, without waiting for completion. """
region = {"type": "Polygon",
"coordinates": [[[24, -33.6], [24, -33.53], [23.93, -33.53], [23.93, -33.6], [24, -33.6]]]}
image_id = 'LANDSAT/LC08/C02/T1_L2/LC08_172083_20190128'
ee_image = ee.Image(image_id)
export.export_image(ee_image, image_id.replace('/', '-'), folder='geedim_test', region=region, wait=False)
def _test_composite(self, ee_image):
""" Test the metadata of a composite ee.Image for validity. """
gd_image = image.Image(ee_image)
ee_coll_name = image.split_id(gd_image.id)[0]
gd_coll_name = info.ee_to_gd[ee_coll_name]
sr_band_df = pd.DataFrame.from_dict(info.collection_info[gd_coll_name]['bands'])
for key in ['bands', 'properties', 'id']:
self.assertTrue(key in gd_image.info.keys(), msg='Image gd_info complete')
self.assertTrue(gd_image.info[key] is not None, msg='Image gd_info complete')
for key in ['crs', 'scale']:
self.assertTrue(gd_image.info[key] is None, msg='Composite in WGS84')
im_band_df = | pd.DataFrame.from_dict(gd_image.info['bands']) | pandas.DataFrame.from_dict |
"""
pygemfxns_preprocessing.py is a list of the model functions that are used to preprocess the data into the proper format.
"""
# Built-in libraries
import os
#import glob
import argparse
# External libraries
import pandas as pd
import numpy as np
#import xarray as xr
#import netCDF4 as nc
#from time import strftime
#from datetime import datetime
#from scipy.spatial.distance import cdist
#from scipy.optimize import minimize
#import matplotlib.pyplot as plt
# Local libraries
import pygem.pygem_input as pygem_prms
import pygemfxns_modelsetup as modelsetup
#%% TO-DO LIST:
# - clean up create lapse rate input data (put it all in pygem_prms.py)
#%%
def getparser():
"""
Use argparse to add arguments from the command line
Parameters
----------
option_createlapserates : int
Switch for processing lapse rates (default = 0 (no))
option_wgms : int
Switch for processing wgms data (default = 0 (no))
Returns
-------
Object containing arguments and their respective values.
"""
parser = argparse.ArgumentParser(description="select pre-processing options")
# add arguments
parser.add_argument('-option_wgms', action='store', type=int, default=1,
help='option to pre-process wgms data (1=yes, 0=no)')
return parser
parser = getparser()
args = parser.parse_args()
#%%
#rgi_regionsO1 = [13,14,15]
#main_glac_rgi_all = pd.DataFrame()
#for region in rgi_regionsO1:
# main_glac_rgi_region = modelsetup.selectglaciersrgitable(rgi_regionsO1=[region], rgi_regionsO2='all',
# rgi_glac_number='all')
# main_glac_rgi_all = main_glac_rgi_all.append(main_glac_rgi_region)
#%%
if args.option_wgms == 1:
wgms_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/WGMS/DOI-WGMS-FoG-2019-12/'
wgms_ee_fn = 'WGMS-FoG-2019-12-EE-MASS-BALANCE.csv'
wgms_e_fn = 'WGMS-FoG-2019-12-E-MASS-BALANCE-OVERVIEW.csv'
wgms_id_fn = 'WGMS-FoG-2019-12-AA-GLACIER-ID-LUT.csv'
wgms_e_df = pd.read_csv(wgms_fp + wgms_e_fn, encoding='unicode_escape')
wgms_ee_df = pd.read_csv(wgms_fp + wgms_ee_fn, encoding='unicode_escape')
wgms_id_df = | pd.read_csv(wgms_fp + wgms_id_fn, encoding='unicode_escape') | pandas.read_csv |
import re
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.tseries.offsets import BDay
class TestXS:
def test_xs(self, float_frame, datetime_frame):
idx = float_frame.index[5]
xs = float_frame.xs(idx)
for item, value in xs.items():
if np.isnan(value):
assert np.isnan(float_frame[item][idx])
else:
assert value == float_frame[item][idx]
# mixed-type xs
test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
frame = DataFrame(test_data)
xs = frame.xs("1")
assert xs.dtype == np.object_
assert xs["A"] == 1
assert xs["B"] == "1"
with pytest.raises(
KeyError, match=re.escape("Timestamp('1999-12-31 00:00:00', freq='B')")
):
datetime_frame.xs(datetime_frame.index[0] - BDay())
# xs get column
series = float_frame.xs("A", axis=1)
expected = float_frame["A"]
tm.assert_series_equal(series, expected)
# view is returned if possible
series = float_frame.xs("A", axis=1)
series[:] = 5
assert (expected == 5).all()
def test_xs_corner(self):
# pathological mixed-type reordering case
df = DataFrame(index=[0])
df["A"] = 1.0
df["B"] = "foo"
df["C"] = 2.0
df["D"] = "bar"
df["E"] = 3.0
xs = df.xs(0)
exp = pd.Series([1.0, "foo", 2.0, "bar", 3.0], index=list("ABCDE"), name=0)
tm.assert_series_equal(xs, exp)
# no columns but Index(dtype=object)
df = DataFrame(index=["a", "b", "c"])
result = df.xs("a")
expected = Series([], name="a", index= | pd.Index([]) | pandas.Index |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import os
# In[2]:
train_encoded = pd.read_csv("../data/train_store_encoded_onehot.csv")
# In[3]:
train_df = pd.read_csv("../data/train.csv")
store_df = pd.read_csv("../data/store.csv")
# In[4]:
cate_df = store_df.apply(lambda x: (x["Store"], x["StoreType"] + x["Assortment"]), axis = 1).map(lambda x: x[-1]).copy().reset_index()
cate_df.columns = ["Store", "cate"]
cate_df["Store"] = cate_df["Store"] + 1
# In[5]:
def calculate_days_num(data_df, cate_df):
import gc
data_df["Date"] = pd.to_datetime(data_df["Date"])
merge_df = pd.merge(data_df[["Date", "Store", "Sales"]], cate_df, on = "Store", how = "inner")
print("merge_df shape : {}".format(merge_df.shape))
from functools import reduce
ordered_intersection_dates = sorted(pd.to_datetime(sorted(reduce(lambda a, b: a.intersection(b),map(lambda x: set(x.tolist()),merge_df.groupby("cate").apply(dict).map(lambda inner_dict:inner_dict["Date"]).values.tolist())))))
ordered_intersection_dates = pd.Series(ordered_intersection_dates)
#return ordered_intersection_dates
sales_date_intersection = merge_df.copy()
del merge_df
gc.collect()
sales_date_intersection = sales_date_intersection[sales_date_intersection["Date"].isin(ordered_intersection_dates)].copy()
def transform_dict_to_df(row):
Store, dict_ = row["cate"], row[0]
Date = dict_["Date"].tolist()
Sales = dict_["Sales"].tolist()
df = pd.DataFrame(list(zip(*[Date, Sales])))
df.columns = ["Date", Store]
return df
before_reduce_list = sales_date_intersection.groupby("cate").apply(dict).reset_index().apply(
transform_dict_to_df
, axis = 1).values.tolist()
#return before_reduce_list
before_reduce_list = list(map(lambda x: x.groupby("Date").sum().reset_index(), before_reduce_list))
sales_cate_format_df = reduce(lambda a, b: pd.merge(a, b, on = "Date", how = "inner"), before_reduce_list)
return sales_cate_format_df
# In[6]:
sales_cate_format_df = calculate_days_num(train_df, cate_df[cate_df["cate"].isin(cate_df["cate"].value_counts()[cate_df["cate"].value_counts() > 70].index.tolist())])
# In[7]:
sales_cate_format_df["total"] = sales_cate_format_df.iloc[:, 1:].apply(lambda x: x.sum(), axis = 1)
# In[8]:
from functools import reduce
sales_cate_format_df_up = sales_cate_format_df[sales_cate_format_df.iloc[:, 1:].apply(lambda x: reduce(lambda a, b: a * b ,map(int,map(bool, x))), axis = 1) > 0]
# In[9]:
df = sales_cate_format_df_up.copy()
df.index = | pd.to_datetime(df["Date"]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 08:00:00 2020
@author: <NAME>
contact : <EMAIL>
Ceci correspond aux fonctions utilisées pour traiter les données dans le cadre du projet ODIASP
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import scipy
from scipy.ndimage import zoom, center_of_mass
import matplotlib.pyplot as plt
import numpy as np
import cupy as cp
from cupyx.scipy import ndimage as MAGIC
import pydicom
from pydicom.dataset import Dataset, FileDataset
from pydicom.uid import ExplicitVRLittleEndian
import pydicom._storage_sopclass_uids
import os
import pandas
from PIL import Image
import random
from shutil import copy2, move
from copy import copy
import skimage.io as io
import skimage.transform as trans
from alive_progress import alive_bar
import datetime
import scipy
import openpyxl
import time
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras.losses import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
import tensorflow.keras.backend as K
from Settings import COMPUTE_CAPACITY
def DirVerification (name,DossierProjet=None,verbose = 1):
if os.path.exists(name):
if verbose ==1 :
print("Le dossier " + str(name) + " existe déja : ", os.path.abspath(name))
return name
else :
if DossierProjet!=None:
dir_path = os.path.join(DossierProjet,name)
else : dir_path = name
if os.path.exists(dir_path):
if verbose ==1 :
print("Le dossier " + str(name) + " existe déja : ")
print(name, " : ", dir_path)
else :
os.mkdir(dir_path)
if verbose == 1 :
print("Création du dossier " + str(name)+ " : ")
print(name, " : ", dir_path)
return dir_path
#___________________________________________________________________________________________
#___________________FONCTIONS POUR LA CREATION DES DATASETS ________________________________
#___________________________________________________________________________________________
def readCSV(csv_path,name=None,indexing=None):
"""
Fonction simple pour lire le CSV et le garder en mémoire sous la forme d'un datafile, plus facilement lisible en utilisant pandas
si on rentre name (un des fichiers numpy disponibles), la fonction affiche cette valeur
On peut rentrer un string pour l'arg indexing pour demander a classer selon la colonne.
"""
df=pandas.read_csv(csv_path, delimiter=",",dtype=str)
if indexing != None :
df.set_index(indexing, inplace=True)
if name:
print(df.loc[name])
return df
#___________________________________________________________________________________________
#___________________FONCTIONS POUR IMPORTER LES FICHIERS DICOM______________________________
#___________________________________________________________________________________________
def fast_scandir(dir):
"""
Prend un dossier contenant atant de sous-dossiers et sous-sous-dossiers que voulu et en crée la liste des sous dossiers.
Utile pour généraliser une fonction adaptée à un dossier à autant de dossiers que voulus en une seule fois.
Rq : le dossier dir n'est pas inclus dans la liste
Parameters
----------
- dir : string, chemin vers le dossier racine
Returns
-------
- subfloders : liste, contient tous les sous-dossiers
"""
subfolders= [f.path for f in os.scandir(dir) if f.is_dir()]
for dir in list(subfolders):
subfolders.extend(fast_scandir(dir))
return subfolders
def TESTINGPRED(prediction,classes,mode="ProportionGlobale", nombredecoupes=1, numerocoupeinitial = 0, verbose=1):
longueur = len(prediction)
proportion =0
if numerocoupeinitial>longueur: #utilisé à des fins de DEBUGGING uniquement : a priori non possible.
print("error")
numerocoupeinitial=longueur
if (nombredecoupes+numerocoupeinitial)>longueur:
nombredecoupes = longueur - numerocoupeinitial
if mode=="ProportionGlobale":
#test de l'ensemble du volume
nombredecoupes =longueur
for i in range(0,longueur):
proportion += prediction[i]
if verbose >0 :
for item in range(0,len(classes)):
print(mode,"sur",nombredecoupes,"coupes de",classes[item], '{:.3f}'.format(proportion[item]/nombredecoupes))
if mode=="ProportionFinale":
#test des "nombredecoupes" dernières coupes
if nombredecoupes>longueur : #au cas où l'on ait choisi trop de coupes
nombredecoupes=longueur
for i in range(numerocoupeinitial,nombredecoupes+numerocoupeinitial):
proportion += prediction[-i]
if verbose >0 :
for item in range(0,len(classes)):
print(mode,"sur",nombredecoupes,"coupes de",classes[item], '{:.3f}'.format(proportion[item]/nombredecoupes))
if mode=="ProportionInitiale":
#test des "nombredecoupes" dernières coupes
if nombredecoupes>longueur : #au cas où l'on ait choisi trop de coupes
nombredecoupes=longueur
for i in range(numerocoupeinitial,nombredecoupes+numerocoupeinitial):
proportion += prediction[i]
if verbose >0 :
for item in range(0,len(classes)):
print(mode,"sur",nombredecoupes,"coupes de",classes[item], '{:.3f}'.format(proportion[item]/nombredecoupes), "à partir de la coupe", numerocoupeinitial)
return proportion/nombredecoupes
def import_dicom_to_abdopelv(rootdir,
metadata,
csv_path,
save=False,
model = None,
verbose = 2,
Compute_capacity = COMPUTE_CAPACITY,
CUPY = True
):
"""
Cette fonction charge un dossier contenant des fichiers DICOM. Elle peut s'arréter automatiquement si les métadonnées ou le nombre d'image ne correspondent pas.
Sinon elle charge toutes les coupes pour les analyser et les labeliser. Si le scanner contient une partie abdomonopelvienne il est mis en mémoire (voire sauvegarder si "save" est rempli) puis les infos sont enregistrées dans le csv
Parameters
----------
- rootdir : string, chemin complet vers un dossier contenant des fichiers DICOM
- metadata : correspond à la liste des Metadata que l'on veut récupérer dans les DICOM
- csv_path : chemin (complet) vers le csv pour enregistrer les metadatas du scanner. Le CSV sera créé automatiquement si celui-ci n'existe pas encore
- save=False, False ou string, indique le chemin pour sauvegarder les volumes .npy correspondant aux scanners chargés. Optionnel (False : garde les numpy en mémoire.) Remarque : les numpy sont lourds, plus que les DICOM (qui sont compressés), environ 700Mo pour un scan abdo, on peut vite remplir un disque dur en utilisant cete fonction sur un dossier comprenant beaucoup de scanners !
- model : le model de labelisation
- Compute_capacity = COMPUTE_CAPACITY : int, la capacité de calcul des gpu, voir site nvidia. Pour adapter automatiquement le calcul réalisé par le reseau en fonction de la capacité de l'ordinateur
- verbose : qt de verbose. 0 pas de verbose ; 1 : texte seulement ; 2 : texte et images (met le terminal en pause à chaque image)
Returns
-------
Si le scanner a été chargé :
- volume_numpy : le volume chargé.
- perduSUP, perduBAS : les coupes non chargées sur la partie supérieure et la partie basse du scanner
- facteur : facteur d'agrandissement
- NOMFICHIER : le nom du fichier qui a été utilisé pour remplir le .csv
Si la fonction s'est arrêtée spontanément :
- volume_numpy = None
- perduSUP = "Arret"
- perduBAS = 0
- facteur = "niveau1" ou "niveau2"
- None
Notes
-----
"""
classtotest = ['Abdomen','Cervical','Diaphragme','Membreinf','Pelvien','Thorax']
#Verification que le dossier de sortie existe
if save != False :
save = DirVerification (save, verbose = 0)
#Ecriture du csv
if os.path.isfile(csv_path)==False:
titres=""
titres += "Name"
for ele in metadata:
titres += ","+ str(ele)
titres += ",OriginalSlices,DeletedSlices,Facteur,Path,L3Position,Certitude,L3Original"
if verbose>0:print("Creation du csv")
with open(csv_path,"wt", encoding="utf-8") as file :
file.write(titres)
file.close()
#Création des variables dont nous allons avoir besoin :
stopNow = False
perduSUP = 0
perduBAS = 0
facteur = 1
erreur = " "
volume_numpy = np.empty(0)
inter = {}
start = time.perf_counter()
list_files = os.listdir(rootdir)
if len(list_files) <150:
erreur = " Pas assez de coupes ({} fichiers)".format(len(list_files))
perduSUP = "Arret"
facteur = "niveau1"
if verbose>0:print(" Arrêt précoce de niveau 1. Les images n'ont pas été chargées : ",erreur)
return volume_numpy, perduSUP, perduBAS, facteur, None #le volume numpy est donc vide si le dossier n'avait pas les informations requises.
else :
test1 = 1
test2 = 100
echantillon1 = os.path.join(rootdir, list_files[test1])
echantillon2 = os.path.join(rootdir, list_files[test2])
while os.path.isdir(echantillon1):
test1 +=1
echantillon1 = os.path.join(rootdir, list_files[test1])
while os.path.isdir(echantillon1):
test2 +=1
echantillon2 = os.path.join(rootdir, list_files[test2])
if not os.path.isdir(echantillon1):
_ds_1 = pydicom.dcmread(echantillon1,force =True)
"""
Donnons un nom à cette série
"""
if (0x20, 0x000E) in _ds_1:
NameSerie = str(_ds_1["SeriesInstanceUID"].value)
NameSerie = NameSerie.replace('.','')
NameSerie = NameSerie[-30:]
else :
NameSerie = "000000000000000000000000000000"
NOMFICHIER = str(os.path.basename(rootdir))+"_"+NameSerie+r".npy"
"""
Verifions que cette serie n'a pas deja ete analysee en quel cas on s'arrête.
"""
if os.path.isfile(csv_path)==True:
df = readCSV(csv_path,name=None,indexing="Name")
if df.index.str.contains(NameSerie).any() :
erreur = " Scanner déjà analysé."
if verbose>0:print(" Arrêt précoce de niveau 1. Les images n'ont pas été chargées : ",erreur)
perduSUP = "Arret"
facteur = "niveau1"
return volume_numpy, perduSUP, perduBAS, facteur, None
"""
Nous essayons de déterminer s'il s'agit d'un scanner AP ou TAP à partir uniquement des métadonnées du dicom
Cela permet si ce n'est pas le cas de réaliser une fonction rapide, qui ne charge quasiment aucune image.
"""
if (0x08, 0x60) in _ds_1:
modalite = _ds_1["Modality"].value
if str(modalite) != "CT": #Limitation si ce n'est pas un scanner !
erreur += " Le fichier DICOM n'est pas un scanner."
stopNow = True
if (0x18, 0x50) in _ds_1:
thickness = _ds_1["SliceThickness"].value
if thickness >2.5: #Limitation si coupe trop épaisses : MIP...etc
erreur += " Epaisseur de coupe trop importante."
stopNow = True
if (0x28, 0x1050) in _ds_1:
WindowCenter = _ds_1["WindowCenter"].value
try :
if WindowCenter <0:
erreur += " Scanner Pulmonaire." #Limitation si fenetre pulmonaire
stopNow = True
except :
erreur += " Erreur inconnue sur le WindowCenter :" + str(WindowCenter) + "."
stopNow = True
if WindowCenter ==500:
erreur += " Fenetrage Os." #Limitation si fenetre os (car trop de grain)
stopNow = True
if (0x18, 0x15) in _ds_1:
BodyPartExamined = _ds_1["BodyPartExamined"].value
if "HEAD" in str(BodyPartExamined) : #Limitation si imagerie cerebrale
erreur += " BodyPartExamined : "+str(BodyPartExamined)
stopNow = True
#Verification de l'âge
if (0x10, 0x10) in _ds_1:
Age = _ds_1["PatientAge"].value
if Age[-1:] != "Y" :
erreur += " Patient mineur : "+str(Age)
stopNow = True
else :
try :
Age = Age[:-1]
if float(Age) < 18. : #Limitation si patient mineur
erreur += " Patient mineur : "+str(Age)
stopNow = True
except TypeError:
print("Erreur dans la lecture de l'âge chez ce patient :", Age, type(Age))
pass
if (0x18, 0x1160) in _ds_1:
BodyPartExamined = _ds_1["FilterType"].value
if "HEAD" in str(BodyPartExamined) : #Limitation si imagerie cerebrale (autre moyen de verification)
erreur += " FilterType : " + str(BodyPartExamined)
stopNow = True
if (0x8, 0x103E) in _ds_1:
BodyPartExamined = _ds_1["SeriesDescription"].value
if "Crane" in str(BodyPartExamined) : #Limitation si imagerie cerebrale (autre moyen de verification)
erreur += " Scanner Cranien."
stopNow = True
if not os.path.isdir(echantillon2):
_ds_2 = pydicom.dcmread(echantillon2,force =True,specific_tags =["ImagePositionPatient","SliceThickness"])
position1 = [5.,10.,15.]
position2 = [5.,10.,15.] #Lecture de la position pour ne pas prendre de MPR coro ou sag
if (0x20, 0x32) in _ds_1:
position1 = _ds_1["ImagePositionPatient"].value
if (0x20, 0x32) in _ds_2:
position2 = _ds_2["ImagePositionPatient"].value
if position1[0] != position2[0]:
erreur += " Reconstruction Sagittale."
stopNow = True
if position1[1] != position2[1]:
erreur += " Reconstruction Coronale."
stopNow = True
if stopNow == True:
"""
Si le scanner n'est ni AP ni TAP la fonction s'arrête dès maintenant.
"""
if verbose>0:print(" Arrêt précoce de niveau 1. Les images n'ont pas été chargées : ",erreur)
perduSUP = "Arret"
facteur = "niveau1"
if verbose>1:print("Mise à jour du fichier csv :", csv_path)
#Essai remplir csv meme pour les arrets
values=[]
for de2 in metadata:
if de2 in _ds_1:
if _ds_1[de2].VR == "SQ":
values = values + "sequence"
elif _ds_1[de2].name != "Pixel Data":
_ds = str(_ds_1[de2].value)[:64]
raw_ds = _ds.replace('\n','__')
raw_ds = raw_ds.replace('\r','__')
raw_ds = raw_ds.replace('\t',"__")
raw_ds = raw_ds.replace(',',"__")
values.append(raw_ds)
end = time.perf_counter()
Timing = end-start
dictMETADATAS = dict(zip(metadata, values))
dictODIASP = {'Name' : NOMFICHIER, "Duree" : Timing, "Erreur" : erreur,'OriginalSlices' : len(list_files), 'Path' : rootdir, "Archive" : None}
dict3 = {**dictMETADATAS , **dictODIASP}
df=pandas.read_csv(csv_path, delimiter=",")
modDfObj = df.append(dict3, ignore_index=True)
modDfObj.to_csv(csv_path, index=False)
return volume_numpy, perduSUP, perduBAS, facteur, None #le volume numpy est donc vide si le dossier n'avait pas les informations requises.
if stopNow == False:
"""
Maintenant que l'on a arrêté la fonction précocement selon certains criteres, regardons la liste des images
"""
for f in list_files:
if not os.path.isdir(f):
f_long = os.path.join(rootdir, f)
_ds_ = pydicom.dcmread(f_long,specific_tags =["ImagePositionPatient","SliceThickness"])
inter[f_long]=_ds_.ImagePositionPatient[2]
inter_sorted=sorted(inter.items(), key=lambda x: x[1], reverse=True) #il faut les trier dans l'ordre de lasequece d escanner (ce qui n'est pas l'ordre alphabetique du nom des fichiers)
liste_fichiers=[x[0] for x in inter_sorted]
path_img1=liste_fichiers[0]
ds_img1=pydicom.dcmread(path_img1,stop_before_pixels=True)
x_dim=int(ds_img1[0x28,0x10].value)
y_dim=int(ds_img1[0x28,0x11].value)
nbcoupes = len(liste_fichiers)
if verbose>0:print(len(liste_fichiers), " fichiers trouvés pour ce scanner")
if verbose>0:print("Creation d'un volume echantillon pour labelisation")
x_dimDIV=x_dim/4
y_dimDIV=y_dim/4
ratioECHANTILLONAGE = 5 #Nous allons tester le volume à cet intervalle de coupe
hauteur = len(liste_fichiers)//ratioECHANTILLONAGE
volume_pour_label=np.zeros((hauteur,int(x_dimDIV),int(y_dimDIV),3))
for k in range (0,hauteur):
dicom_file = pydicom.read_file(liste_fichiers[ratioECHANTILLONAGE*k])
img_orig_dcm = (dicom_file.pixel_array)
slope=float(dicom_file[0x28,0x1053].value)
intercept=float(dicom_file[0x28,0x1052].value)
img_modif_dcm=(img_orig_dcm*slope) + intercept
if (0x28, 0x1050) in dicom_file:
WindowCenter = dicom_file["WindowCenter"].value
if not isinstance(WindowCenter, float) :
WindowCenter = 40
if (0x28, 0x1051) in dicom_file:
WindowWidth = dicom_file["WindowWidth"].value
arraytopng = zoom(img_modif_dcm, (1/4, 1/4))
arraytopng = np.stack((arraytopng,)*3, axis=-1)
volume_pour_label[k,:,:,:]=arraytopng
del arraytopng
volume_pour_label = np.asarray(volume_pour_label, dtype=np.float16)
volume_pour_label,a,b = normalize(volume_pour_label)
volume_pour_label = WL_scaled(WindowCenter,WindowWidth,volume_pour_label,a,b)
if verbose>1:affichage3D(volume_pour_label, 64, axis=2)
if verbose >0 : print("Analyse du volume pour obtention d'un scanner abdominopelvien")
if verbose >0 : AA=1 #Permet de mettre corriger le verbose si celui_ci était de 2.
else : AA=0
AUTO_BATCH = int(Compute_capacity*5.3)
prediction = model.predict(volume_pour_label, verbose =AA, batch_size=AUTO_BATCH)
prediction0_1 = np.zeros_like(prediction, dtype=None, order='K', subok=True, shape=None)
for i in range (0,np.shape(prediction)[0]):
prediction0_1[i][np.argmax(prediction[i])] = 1
moyenne = TESTINGPRED(prediction0_1,classtotest,"ProportionGlobale",verbose=0)
if moyenne[4] <0.2:
stopNow = True
erreur = "Le scanner ne possède pas assez de coupes pelviennes"
fin = TESTINGPRED(prediction0_1,classtotest,
"ProportionFinale",
nombredecoupes=50//ratioECHANTILLONAGE,
numerocoupeinitial = 0,verbose=0)
if (fin[1]+fin[2]+fin[0]+fin[5]) >0.35 : #plus de 35 % finaux sont cervical, diaphragme, abdo ou thorax
stopNow = True
erreur = "La fin du volume n'est pas un scanner abdominopelvien"
if stopNow == True:
volume_numpy = np.empty(0)
if verbose>0:print(" Arrêt précoce de niveau 2. Les images ont été chargées partiellement puis arrêtées : ",erreur)
perduSUP = "Arret"
facteur = "niveau2"
if verbose>1:print("Mise à jour du fichier csv :", csv_path)
#Essai remplir csv meme pour les arrets
values=[]
for de2 in metadata:
if de2 in ds_img1:
if ds_img1[de2].VR == "SQ":
values = values + "sequence"
elif ds_img1[de2].name != "Pixel Data":
_ds = str(ds_img1[de2].value)[:64]
raw_ds = _ds.replace('\n','__')
raw_ds = raw_ds.replace('\r','__')
raw_ds = raw_ds.replace('\t',"__")
raw_ds = raw_ds.replace(',',"__")
values.append(raw_ds)
end = time.perf_counter()
Timing = end-start
dictMETADATAS = dict(zip(metadata, values))
dictODIASP = {'Name' : NOMFICHIER, "Duree" : Timing, "Erreur" : erreur,'OriginalSlices' : len(list_files), 'Path' : rootdir, "Archive" : None}
dict3 = {**dictMETADATAS , **dictODIASP}
df=pandas.read_csv(csv_path, delimiter=",")
modDfObj = df.append(dict3, ignore_index=True)
modDfObj.to_csv(csv_path, index=False)
return volume_numpy, perduSUP, perduBAS, facteur, None
if stopNow == False:
if verbose==0: print("Chargement, cela peut être long ...")
"""
Nous allons maintenant retirer les coupes initiales ou coupes finales si jamais elles n'appartiennent pas au volume abdopelv
"""
total = len(prediction)
tranchedelecture = 30 #Lecture des 30 premieres coupes :
for i in range(0,total,tranchedelecture//ratioECHANTILLONAGE):
debut = TESTINGPRED(prediction0_1,classtotest,
"ProportionInitiale",
nombredecoupes=tranchedelecture//ratioECHANTILLONAGE,
numerocoupeinitial=i,verbose=0)
if debut[5]+debut[1] > (debut[0]+debut[2]+debut[4]) : # plus de thorax et cervical que abdopelv
if verbose>1:print(" Sur les ", tranchedelecture, " premières coupes : proportion de crane,cervical :", debut[1], "thorax :", debut[5]," diaphragme :", debut[2]," abdo :", debut[0]," pelv :", debut[4]," mbinf :", debut[3])
liste_fichiers= liste_fichiers[tranchedelecture:]
perduSUP += tranchedelecture
if verbose>0:print("Supression de ",tranchedelecture," coupes dont la majorité est du crane, cervical ou thorax.")
if verbose>0 and perduSUP==0 :print("... Pas de coupes crane ni cervical ni thorax majoritaires initialement.")
total = len(prediction) #mise à jour suite à la découpe faite juste au dessus
tranchedelecture = 30
for i in range(0,total,tranchedelecture//ratioECHANTILLONAGE):
fin = TESTINGPRED(prediction0_1,classtotest,
"ProportionFinale",
nombredecoupes=tranchedelecture//ratioECHANTILLONAGE,
numerocoupeinitial=i,verbose=0)
if fin[3] > (fin[4]+fin[0]) : # plus de mb inf que pelvien ou abdo
if verbose>1:print(" Sur les ", tranchedelecture, " dernières coupes : proportion de crane,cervical :", debut[1], "thorax :", debut[5]," diaphragme :", debut[2]," abdo :", debut[0]," pelv :", debut[4]," mbinf :", debut[3])
#if verbose>1:print("Proportion de abdominopelvien:", debut[0])
liste_fichiers= liste_fichiers[:-tranchedelecture]
perduBAS += tranchedelecture
if verbose>0:print("Supression de ",tranchedelecture," coupes finales dont la majorité est du membre inférieur.")
if verbose>0 and perduBAS==0 :print("... Pas de coupes membres inférieurs majoritaires à la fin.")
del volume_pour_label
#Creation du volume representant le scanner dont on garde les coupes
volume_numpy=np.zeros((len(liste_fichiers),x_dim,y_dim))
slope=float(ds_img1[0x28,0x1053].value)
intercept=float(ds_img1[0x28,0x1052].value)
for k in range (0,len(liste_fichiers)):
dicom_file = pydicom.read_file(liste_fichiers[k])
img_orig_dcm = (dicom_file.pixel_array)
img_modif_dcm=(img_orig_dcm*slope) + intercept
img_modif_dcm= np.asarray(img_modif_dcm, dtype=np.float16)
volume_numpy[k,:,:]=img_modif_dcm #ecrit une ligne correspondant à l'image
volume_numpy = np.asarray(volume_numpy, dtype=np.float16)
if len(liste_fichiers)>384 : #Cette partie de la fonction permet de s'affranchir des inégalités dépaisseurs de coupes.
facteur = 384/(len(liste_fichiers))
nbcoupesfinal = int(len(liste_fichiers)*facteur)
if facteur !=1 :
if verbose>0:print(len(liste_fichiers)," coupes ont été chargées puis le volume est ramené à ", nbcoupesfinal, " coupes")
#volume_numpy = zoom(volume_numpy, (facteur, 1, 1))
#CUPY
if CUPY == True:
cp.cuda.Device(0).use()
x_gpu_0 = cp.asarray(volume_numpy)
x_gpu_0 = MAGIC.zoom(x_gpu_0, (facteur, 1, 1))
volume_numpy = cp.asnumpy(x_gpu_0)
x_gpu_0 = None
else :
volume_numpy = zoom(volume_numpy, (facteur, 1, 1))
else :
if verbose>0: print(len(liste_fichiers), " coupes ont étés chargées")
#Sauvegarde .npy
if save != False:
if verbose>0: print("Sauvegarde de "+NOMFICHIER+" ("+str(nbcoupesfinal)+" coupes) dans le dossier "+save)
np.save(os.path.join(save,NOMFICHIER),volume_numpy)
#Affichage
if verbose>1:
print("...dont voici l'image sagittale centrale")
volume_numpy = np.asarray(volume_numpy, dtype=np.float16)
affichage3D(volume_numpy, int(x_dim//2), axis=2)
#Mise a jour du csv
if verbose>1:print("Mise à jour du fichier csv :", csv_path)
values=[]
for de2 in metadata:
if de2 in ds_img1:
if ds_img1[de2].VR == "SQ":
values = values + "sequence"
elif ds_img1[de2].name != "Pixel Data":
_ds = str(ds_img1[de2].value)[:64]
raw_ds = _ds.replace('\n','__')
raw_ds = raw_ds.replace('\r','__')
raw_ds = raw_ds.replace('\t',"__")
raw_ds = raw_ds.replace(',',"__")
values.append(raw_ds)
end = time.perf_counter()
Timing = end-start
dictMETADATAS = dict(zip(metadata, values))
dictODIASP = {'Name' : NOMFICHIER, "Duree" : Timing,'OriginalSlices' : nbcoupes, 'DeletedSlices' : str(perduSUP)+r"+"+str(perduBAS),'Facteur' : facteur , 'Path' : rootdir, "Archive" : None}
dict3 = {**dictMETADATAS , **dictODIASP}
df=pandas.read_csv(csv_path, delimiter=",")
modDfObj = df.append(dict3, ignore_index=True)
modDfObj.to_csv(csv_path, index=False)
return volume_numpy, perduSUP, perduBAS, facteur, NOMFICHIER
#___________________________________________________________________________________________
#___________________FONCTIONS POUR AFFICHAGE DES IMAGES_____________________________________
#___________________________________________________________________________________________
def ApplyWindowLevel (Global_Level,Global_Window,image):
"""
Utilisée dans FindL3
Les valeurs des voxels en DICOM sont entre -2000 et +4000, pour afficher une image en echelle de gris (255 possibilités de gris sur un oridnateur classique) il faut réduire les 6000 possibilités à 255. Cette fonction est nécessaire avant d'afficher une image mais fait perdre des données (passage de 16 bits à 8 bits par pixel).
Obligatoire pour sauvegarder une image png ou jpg mais fait perdre de l'information !
On redéfinit les valeurs des pixels selon une largeur de fenetre et un centre
Parameters
----------
- Global_Level : centre de la fenetre (en UH)
- Global_Window : largeur de la fenetre (en UH)
- image : image ou volume numpy chargé en mémoire
Returns
-------
- image_ret : l'image ou le volume après réglage du contraste.
Notes
-----
Ne fonctionne PAS si l'image a déjà été normalisée. Dans ce cas utiliser WL_scaled en fournissant a et b (obtenu par la fonction normalize).
"""
li=Global_Level-(Global_Window/2)
ls=Global_Level+(Global_Window/2)
image_ret=np.clip(image, li, ls)
image_ret=image_ret-li
image_ret=image_ret/(ls-li)
image_ret=image_ret*255
return image_ret
def Norm0_1 (volume_array):
"""
les scanners ont des voxels dont la valeur est négative, ce qui sera mal interprété pour une image, il faut donc normaliser entre 0 et 1. Cela permet notamment de les afficher sous formlat image apres un facteur de *255.
"""
a,b,c=volume_array.min(),volume_array.max(),volume_array.mean()
volume_array_scale=(volume_array-a)/(b-a)
return volume_array_scale,a,b,c
def WL_scaled (Global_Level,Global_Window,array,a,b):
"""
Idem que ApplyWindowLevel mais corrigé par les facteurs a et b qui correpsondent au min et max,
>>> à utiliser à la place de ApplyWindowLevel si on a utilisé Norm0_1 ou normalize
Utilisée dans FindL3
Les valeurs des voxels en DICOM sont entre -2000 et +4000, pour afficher une image en echelle de gris (255 possibilités de gris sur un oridnateur classique) il faut réduire les 6000 possibilités à 255. Cette fonction est nécessaire avant d'afficher une image mais fait perdre des données (passage de 16 bits à 8 bits par pixel).
Obligatoire pour sauvegarder une image png ou jpg mais fait perdre de l'information !
On redéfinit les valeurs des pixels selon une largeur de fenetre et un centre
On sauvegarde les bornes initiales dans les variables a et b dans le cas où l'on veuille modifier le contraste après coup
Parameters
----------
- Global_Level : centre de la fenetre (en UH)
- Global_Window : largeur de la fenetre (en UH)
- array : image ou volume numpy chargé en mémoire
- a : minimum en UH avant normalize
- b : maximum en UH avant normalize
Returns
-------
- image_ret : l'image ou le volume après réglage du contraste.
Notes
-----
Ne fonctionne QUE si l'image a déjà été normalisée.
"""
li=Global_Level-(Global_Window/2)
ls=Global_Level+(Global_Window/2)
li=li/b
ls=ls/b
image_ret=np.clip(array, li, ls)
image_ret=image_ret-li
image_ret=image_ret/(ls-li)
return image_ret
#___________________________________________________________________________________________
#___________________FONCTIONS POUR VALIDER LA SEGMENTATION__________________________________
#___________________________________________________________________________________________
def normalize (volume_array):
"""
Utilisée dans FindL3
Les valeurs des voxels en DICOM sont entre -2000 et +4000, pour limiter les calculs du réseau de neurones il est conseillé de diminuer ces valeurs entre -1 et 1.
On sauvegarde les bornes initiales dans les variables a et b dans le cas où l'on veule modifier le contraste après coup
Parameters
----------
- volume_array : volume numpy chargé en mémoire
Returns
-------
- volume_array : volume numpy chargé en mémoire
- a : valeur minimum du volume avant la fonction
- b : valeur maximum du volume avant la fonction
Notes
-----
a et b peuvent être rentrés tels quels dans la fonction de réglage du contraste WL_scaled
"""
a,b=np.float(volume_array.min()),np.float(volume_array.max())
volume_array = volume_array.astype(np.float)
if abs(a)>abs(b) :
c = abs(a)
else:
c= abs(b)
if c != 0:
volume_array_scale=volume_array/c
return volume_array_scale,a,b
def axial_to_sag (volume_array, sens=1):
"""
Utilisée dans FindL3
rotation pour passer le volume de axial à sagittal
Parameters
----------
- volume_array : volume numpy chargé en mémoire
- sens : int, 0 ou 1
"""
volume_array = np.rot90(volume_array,k=sens,axes=(0,1))
volume_array = np.rot90(volume_array,k=sens,axes=(2,0))
return volume_array
def affichage3D(volume, k, axis=0):
"""
affiche la coupe numéro k d'un volume, selon son axe axis
Parameters
----------
- volume : volume numpy chargé en mémoire
- k : int, numéro de coupe
- axis : int, 0 : axial ; 1 : coronal ; 2 : sag (dans le cas d'un volume chargé en axial)
"""
f = plt.figure()
if axis == 0:
image1 = volume[k,:,:]
if axis == 1:
image1 = volume[:,k,:]
if axis == 2:
image1 = volume[:,:,k]
plt.imshow(image1,cmap='gray')
plt.show()
return
def affichage2D(volume):
"""
affiche un plan numpy 2D
Parameters
----------
- volume : plan numpy chargé en mémoire, en 2 dimensions
"""
f = plt.figure()
image1 = volume
plt.imshow(image1,cmap='gray')
plt.show()
return
def AffichageMulti(volume, frequence, axis=0, FIGSIZE = 40):
"""
affiche toutes les coupes d'un volume selon l'axe axis, avec une frequence entre les coupes définie
Parameters
----------
- volume : volume numpy chargé en mémoire
- frequence : int, espace inter coupe (en voxels)
- axis : int, 0 : axial ; 1 : coronal ; 2 : sag (dans le cas d'un volume chargé en axial)
- FIGSIZE : taille des images pour l'affichage.
"""
coupes = np.shape(volume)[axis]
nb_images = coupes // frequence
fig=plt.figure(figsize=(FIGSIZE, FIGSIZE))
columns = 6
if nb_images % columns >0 :
rows = (nb_images // columns)+1
else :
rows = nb_images // columns
for i in range(nb_images):
i+=1
fig.add_subplot(rows, columns, i)
dix = frequence * i
if axis == 0:
plt.imshow(volume[dix,:,:], cmap='gray')
elif axis == 1:
plt.imshow(volume[:,dix,:], cmap='gray')
elif axis == 2:
plt.imshow(volume[:,:,dix], cmap='gray')
plt.show(block=True)
return
#___________________________________________________________________________________________
#___________________FONCTIONS POUR UTILISER LE RESEAU SUR UN VOLUME INTACT__________________
#___________________________________________________________________________________________
def NPY_to_DICOM (numpy=None,
mode="name",
csvpath=None,
dossier = "L3",
dirgeneral = r"C:\Users\alexa\OneDrive\Documents\ODIASP",
Center = 40,
Width = 400,
numerocoupe = None):
"""
---DEPRECATED---
Cette fonction sert à créer un fichier dicom à partir des infos contenues dans le CSV et en entrant un volume numpy en entrée.
On peut l'utiliser seule ou automatiquement dans la fonction FindL3
a noter :
Parameters
----------
- numpy : nom du numpy selon le csv. Optionnel si mode dossier.
- mode : "name" ou "dossier :
- en mode "name" : il faut nourrir l'argument "numpy" avec un string correspondant au nom d'un fichier .npy situé dans le
dossier "dossier"
- en mode "dossier" : l'arg "numpy" ne sert à rien, la fonction va scanner tout le dossier "dossier" et créer un fichier
dicom pour chaque numpy trouvé
- csv_path : chemin (complet) vers le csv où ont été enregistrées les metadatas de All-in-one
- dossier : sous-dossier dans lequel se trouvent les numpy (si mode "dossier")
- dirgeneral : Dossier de travail où ce situent les autres sous-dossier
- Center = 40 et Width = 400 : ne pas toucher. Correspondent aux réglages de contraste pour l'image de sortie
- numerocoupe : rentre l'information du numero de coupe
Notes
-----
En l'état cette fonction n'est pas utilisée par FindL3 dans All-in-one : elle n'est pas nécessaire car nous récupérons directement le fichier dicom d'origine.
Cette fonction pouvant s'avérer utile par ailleurs, nous la laissons donc en l'état.
"""
if mode=="name" :
if numpy==None:
raise Error
PATHduNUMPY = os.path.join(os.path.join(dirgeneral,dossier),numpy)
VOLUME = np.load(PATHduNUMPY)
image2d = VOLUME.astype(np.uint16)
df=pandas.read_csv(csvpath, delimiter=",")
df.set_index("Name", inplace=True)
#Setting file meta information...
meta = pydicom.Dataset()
meta.MediaStorageSOPClassUID = pydicom._storage_sopclass_uids.MRImageStorage #<<<<<<<<<<<<<<<<<<<<<<<<<<<
meta.MediaStorageSOPInstanceUID = pydicom.uid.generate_uid()
meta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian
#remplissage des metadonnées du dicom
ds = Dataset()
ds.file_meta = meta
ds.is_little_endian = True
ds.is_implicit_VR = False
ds.SOPClassUID = pydicom._storage_sopclass_uids.MRImageStorage
ds.PatientName = "Test^Firstname"
ds.PatientID = "123456"
ds.Modality = "CT"
ds.SeriesInstanceUID = pydicom.uid.generate_uid()
ds.StudyInstanceUID = pydicom.uid.generate_uid()
ds.FrameOfReferenceUID = pydicom.uid.generate_uid()
ds.BitsStored = 16
ds.BitsAllocated = 16
ds.SamplesPerPixel = 1
ds.HighBit = 15
ds.ImagesInAcquisition = "1"
ds.Rows = image2d.shape[0]
ds.Columns = image2d.shape[1]
ds.InstanceNumber = 1
ds.ImagePositionPatient = r"0\0\1"
ds.ImageOrientationPatient = r"1\0\0\0\-1\0"
ds.ImageType = r"ORIGINAL\PRIMARY\AXIAL"
ds.RescaleIntercept = "0"
ds.RescaleSlope = "1"
if numerocoupe != None:
ds.InstanceNumber = int(numerocoupe)
#TaillePixel = str(df.at[numpy,"PixelSpacing"])[3:...] #DEBUG #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#ds.PixelSpacing = str(TaillePixel) + "\\" +str(TaillePixel) #DEBUG
ds.PhotometricInterpretation = "MONOCHROME2"
ds.PixelRepresentation = 1
ds.WindowCenter = Center
ds.WindowWidth = Width
pydicom.dataset.validate_file_meta(ds.file_meta, enforce_standard=True)
print("Setting pixel data... for ", numpy)
ds.PixelData = image2d.tobytes()
#enregistrement dans le dossier
os.chdir(os.path.join(dirgeneral,dossier))
ds.save_as(r"{0}.dcm".format(str(numpy)[0:-4]))
if mode=="dossier":
listfiles = os.listdir(os.path.join(dirgeneral,dossier))
for item in listfiles :
if str(item)[-4:] == ".npy":
NPY_to_DICOM (item, mode="name", csvpath=csvpath, dossier=dossier, dirgeneral=dirgeneral)
#___________________________________________________________________________________________
#___________________FONCTIONS POUR LA VERSION DATA AUGMENT DU RESEAU _______________________
#___________________________________________________________________________________________
def Norm_and_Scale_andCrop(VOLUME,downsample = 0.5,CUPY=True):
"""
prend un volume intact et commence à le traiter
permet de diminuer la taille des fichiers
"""
if CUPY== True:
cp.cuda.Device(0).use()
VOLUME = axial_to_sag(VOLUME)
#volume_array_gpu = cp.asarray(VOLUME)
#volume_array_gpu = cp.rot90(volume_array_gpu,1,axes=(0,1))
#volume_array_gpu = cp.rot90(volume_array_gpu,1,axes=(2,0))
hauteur = np.shape(VOLUME)[1]
correction =("ok",1,0)
if hauteur<384:
ratio =384/hauteur
volume_array_gpu = cp.asarray(VOLUME)
volume_array_gpu = MAGIC.zoom(volume_array_gpu, (1, ratio, 1))
VOLUME = cp.asnumpy(volume_array_gpu)
volume_array_gpu = None
correction = ("trop petit",hauteur, ratio)
if hauteur>384: #a noter que ceci n'est pas censé arriver, la fonction d'import limitant la taille a 384 !
VOLUME = VOLUME[:,-384:,:]
delta = hauteur-384.
correction = ("trop grand", hauteur, delta)
VOLUME = VOLUME[170:342,:,96:-32]
if downsample != 1 :
volume_array_gpu = cp.asarray(VOLUME)
volume_array_gpu = MAGIC.zoom(volume_array_gpu, (1, downsample, downsample))
VOLUME = cp.asnumpy(volume_array_gpu)
volume_array_gpu = None
VOLUMEnorm,a,b = normalize(VOLUME)
#Version CPU
else:
VOLUME = axial_to_sag(VOLUME)
hauteur = np.shape(VOLUME)[1]
correction =("ok",1,0)
if hauteur<384:
ratio =384/hauteur
VOLUME = zoom(VOLUME, (1, ratio, 1))
correction = ("trop petit",hauteur, ratio)
if hauteur>384: #a noter que ceci n'est pas censé arriver, la fonction d'import limitant la taille a 384 !
VOLUME = VOLUME[:,-384:,:]
delta = hauteur-384.
correction = ("trop grand", hauteur, delta)
VOLUME,a,b = normalize(VOLUME)
VOLUME = VOLUME[170:342,:,96:-32]
if downsample != 1 :
VOLUME = zoom(VOLUME, (1, downsample, downsample))
return VOLUMEnorm, correction,a,b, VOLUME
def FindCoupeMediane(volume, verbose =0):
x=0
while np.sum(volume[:,x:,:]) > np.sum(volume[:,:x,:]):
x+=1
if verbose >0 : affichage2D(volume[:,x,:])
if verbose >0 : affichage3D(volume, int(np.shape(volume)[0]/2), axis=0)
return x
#________________________________________________________________________________________
def Find_L3 (name,
model,
NUMPY = "from_hardrive",
downsample =0.5,
csv_path = False,
dirgeneral = r"C:\\Users\\alexa\\OneDrive\\Documents\\ODIASP",
level = 40, window = 400, #
savepng=False,
savedcm=False,
nombredecoupesperduesSUP = 0, nombredecoupesperduesBAS = 0,
facteurAgrandissement = 1,
verbose = 2,
Compute_capacity = COMPUTE_CAPACITY,
CUPY=True
):
"""
Cette fonction prend un volume numpy et le rentre dans le réseau de neurones.
On peut appeler ce volume de deux facons :
- s'il est sauvegardé en rentrant son nom et en laissant NUMPY= "from_hardrive"
- s'il est en mémoire en donnant le nom voulu pour name (pas d'importance hormis pour la cohérence du .csv) et en nourrisant le volume numpy dans l'argument NUMPY.
Parameters
----------
- name : nom donné au volume (doit correspondre au csv)
- model : le model unet pour la segmentation de L3.
- NUMPY = "from_hardrive", par défaut le volume est chargé à partir des dossiers, sinon il suffit de nourrir ici un volumenumpy chargé en mémoire (c'est le cas dans la fonction all-in-one)
- csv_path : chemin (complet) vers le csv où ont été enregistrées les metadatas de All-in-one
- downsample : le downscaling qui a été utilisé pour le reseau de neurones. laisser à 0.5 si vous utilisez le reseau fourni.
- dirgeneral : Dossier de travail où ce situent les autres sous-dossier
- level = 40 et window = 400 : ne pas toucher. Correspondent aux réglages de contraste pour l'image de sortie
- nombredecoupesperduesSUP = 0, nombredecoupesperduesBAS = 0 : nombres de coupes NON chargées, utilisés pour calculer la position sur le scanner initial.
- facteurAgrandissement = 1 : zoom réalisé sur le volume chargé en amont.
- verbose : qt de verbose. 0 pas de verbose ; 1 : texte seulement ; 2 : texte et images (met le terminal en pause à chaque image)
- Compute_capacity = COMPUTE_CAPACITY : pour adapter automatiquement le calcul réalisé par le reseau en fonction de la capacité de l'ordinateur
Returns
-------
image : un volume numpy sans perte d'information correspondant à l'axial de L3
image_wl : une image, prête à être affichée (contraste pré-réglé mais avec perte d'information
positionrelle : les coordonnés selon l'axe z du centre de L3 (sur le scanner entier, incluant les coupes non chargées)
Par ailleurs, enregistre dans le csv les resultats et dans le dossier de sortie savepng les résultats en image png et savedcm en dicom
Notes
-----
La segmentation musculaire est accessoire à cette étape :
- pro : permet d'afficher les résultats à la volée, la sauvegarde des images intermédiaires (image sagittale etc) est accessoire
- con : plus lent que de segmenter par la suite avec ODIASP.PredictMUSCLES
"""
start = time.perf_counter()
#Verification que le dossier de sortie existe
if savepng != False :
savepng = DirVerification (savepng, DossierProjet=dirgeneral,verbose = 0)
if savedcm != False :
savedcm = DirVerification (savedcm, DossierProjet=dirgeneral,verbose = 0)
if type(NUMPY) == str :
if type(NUMPY) == "from_hardrive" :
NUMPY = Reading_Hardrive (name, Class="Images")
else :
NUMPY = NUMPY
else :
NUMPY = NUMPY
#Traitement du volume pour qu'il soit accepté par le reseau de neurones.
if verbose>0:print("Adaptation du volume avant recherche de L3")
Model_import, correction,a,b, backup = Norm_and_Scale_andCrop(NUMPY,downsample=downsample,CUPY=CUPY) #enregistre la 'correction' réalisée dans le scaling
backup = backup + abs(np.min(backup))
backup = (backup/np.max(backup))*255
backup= backup.astype(np.uint8)
versionModel = Image.fromarray(backup[int(np.shape(backup)[0]/2),:,:])#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
Model_import = Model_import[:,:,:,np.newaxis]
if verbose>0:a=1 #au cas où verbose = 2 ce qui ne serait pas accepté par la Method predict
else:a=0
if verbose>0:print("Localisation de L3...")
AUTO_BATCH = int(Compute_capacity*1.3)
prediction = model.predict(Model_import, verbose =a, batch_size=AUTO_BATCH)
del Model_import
#calcul du centre de gravité du volume donné au réseau pour obtenir le centre de L3 (et correction de sa valeur pour correspondre au volume numpy donné en entrée
center = scipy.ndimage.center_of_mass(prediction, labels=None, index=None)#normal
#center_median = FindCoupeMediane(prediction) #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#prediction[prediction<(np.max(prediction)/2)]=0 #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#center_bary_threshold = scipy.ndimage.center_of_mass(prediction, labels=None, index=None) #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#center_median_threshold = FindCoupeMediane(prediction) #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
upsample=1/downsample
position = center[1]*upsample
#center_median *= upsample#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#center_bary_threshold = center_bary_threshold[1] * upsample#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#center_median_threshold *= upsample#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
if correction[0]=="trop petit": #lit la correction
position=int(position/correction[2])
#center_median = int(center_median/correction[2]) #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#center_bary_threshold = int(center_bary_threshold/correction[2]) #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#center_median_threshold = int(center_median_threshold/correction[2])#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
elif correction[0]=="trop grand":
position=int(position+correction[2])
#center_median = int(center_median+correction[2]) #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#center_bary_threshold = int(center_bary_threshold+correction[2]) #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#center_median_threshold = int(center_median_threshold+correction[2])#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
else :
position = int(position)
if verbose>0:print("Axial position : "+str(position), "dans ce volume")
positionreelle = int((position*(1/facteurAgrandissement)) +nombredecoupesperduesSUP)
#center_median = int((center_median*(1/facteurAgrandissement)) +nombredecoupesperduesSUP) #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#center_bary_threshold = int((center_bary_threshold*(1/facteurAgrandissement)) +nombredecoupesperduesSUP) #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#center_median_threshold = int((center_median_threshold*(1/facteurAgrandissement)) +nombredecoupesperduesSUP)#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
if verbose>0 and positionreelle != position : print("Axial position : ",positionreelle, " dans le volume initial")
#Standard_deviation = np.std(prediction)
#Certitude = (Standard_deviation*100)**6
#if verbose>0:print("Estimation de la confiance : ", Certitude)
NUMPY = np.asarray(NUMPY, dtype=np.float16)
image = NUMPY[position,:,:] #image axiale centrée sur le baricentre
image_wl=ApplyWindowLevel(level,window,image) #réglages de contraste
sagittal = NUMPY[:,:,int(center[0])+170] #image sag centrée sur le baricentre #sagittal = NUMPY[:,:,int(center[0])+128] #image sag centrée sur le baricentre
sagittal_wl=ApplyWindowLevel(level,window,sagittal) #réglages de contraste
sagittal_wl[position,:] = np.amax(NUMPY)/6 #crée la ligne horizontale sur l'image sag montrant la coupe axiale trouvée
sagittalPRED = prediction[int(center[0]),:,:]
sagittalPRED = sagittalPRED[:,:,0]
if correction[2] == 0:factueurdecorrection=1
else : factueurdecorrection=correction[2]
sagittalPRED = zoom(sagittalPRED, (1/(factueurdecorrection*downsample),1/downsample))
sagittalPRED *=255
mask_a_afficher =np.zeros(np.shape(sagittal_wl))
mask_a_afficher[:,96:-32] = sagittalPRED
mask_a_save = mask_a_afficher/np.max(mask_a_afficher)
#Gestion des problèmes de nom de fichier vs nom de dossier
if str(name)[-4:] == r".npy":
nameNPY=str(name)[-43:]
name__=str(name)[-43:-4]
else:
name__=str(name)[-39:]
nameNPY=str(name)[-39:]+r".npy"
if savepng != False:
#saving the axial image
arraytopng,_,_,_ = Norm0_1(image_wl)
arraytopng *=255
arraytopng= arraytopng.astype(np.uint8)
im = Image.fromarray(arraytopng)
im.save(os.path.join(savepng,name__)+r"_axial.png")
versionModel.save(os.path.join(savepng,name__)+r"VersionScaleModel.png") #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#saving the sagittal image
sagtopng,_,_,_ = Norm0_1(sagittal_wl+mask_a_save/6)#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
sagtopng *=255
sagtopng= sagtopng.astype(np.uint8)
im2 = Image.fromarray(sagtopng)
im2.save(os.path.join(savepng,name__)+r"_sag.png")
if savedcm != False:
#saving the dicom
NPY_to_DICOM (numpy=nameNPY, mode="name", csvpath=csv_path, dossier = savedcm, dirgeneral = dirgeneral,numerocoupe = positionreelle)
if verbose>1:
_, ax = plt.subplots(1,2,figsize=(25,25))
ax[0].imshow(image_wl, cmap='gray')
ax[1].imshow(sagittal_wl, cmap='gray')
ax[1].imshow(mask_a_afficher, cmap='magma', alpha=0.5)
plt.show()
if csv_path != False: #mettre a jour le csv avec pandas
end = time.perf_counter()
Timing = end-start
df=pandas.read_csv(csv_path, delimiter=",")
df.set_index("Name", inplace=True)
df.at[nameNPY,"L3Position"] = position
#df.at[nameNPY,"Standard_deviation"] = Standard_deviation
#df.at[nameNPY,"Certitude"] = Certitude
df.at[nameNPY,"L3Original"] = positionreelle
#df.at[nameNPY,"center_median"] = center_median
#df.at[nameNPY,"center_bary_threshold"] = center_bary_threshold
#df.at[nameNPY,"center_median_threshold"] = center_median_threshold
df.at[nameNPY,"DureeL3"] = Timing
df.to_csv(csv_path)
return image, image_wl, positionreelle
def All_in_One(dossierDICOM,
METADATAS,
csv_path,
MODEL_niveau_de_coupe,
Model_segmentation_L3,
Model_segmentation_muscles = None,
DIR_SORTIE = False,
VERBOSE = 2,
WINDOW_CENTER = 40,
WINDOW_WIDTH = 400,
DossierDeTravail = None,
CUPY = True
):
"""
Charge les examens depuis un dossier, trouve les images correspondant au scan abdopelvien puis segmente ce volume pour trouver L3.
On peut segmenter les muscles dans la même étape pour afficher le resultat en une seule fois mais ceci nécessite de charger le réseau de segmentation musculaire à chaque fois : cette méthode est plus consommatrice en ressources.
il est conseillé de ne pas segmenter les muscles à cette étape mais de le faire en une seule fois par la suite.
Parameters
----------
- dossierDICOM : Il s'agit du dossier d'import où se situent les images
- METADATAS : les informations que l'on veut sauvegarder parmi les métadonnées des DICOM
- csv_path : chemin (complet) vers le csv où ont été enregistrés, les metadatas de All-in-one
- MODEL_niveau_de_coupe : le modele de labelisation
- Model_segmentation_L3 : le modele de semgentation de L3
- Model_segmentation_muscles : le model (ODIASP) pour la segmentation musculaire, non obligatoire
- DIR_SORTIE : Dossier dans lequel seront sauvegardés les images de résultats (les résultats sont de toute facon enregistrés en format texte dans le csv
- VERBOSE : qt de verbose. 0 pas de verbose ; 1 : texte seulement ; 2 : texte et images (met le terminal en pause à chaque image)
- WINDOW_CENTER = 40 et WINDOW_WIDTH = 400 : ne pas toucher.
- DossierDeTravail : Dossier où seront enregistrées les infos, nécessaire si on utilise la segmentation musculaire par la suite. Optionnel si la segmentation musculaire est faire à la volée.
Returns
-------
Rien.
Mais enregistre dans le csv les resultats et dans le dossier de sortie DIR_SORTIE les résultats
Notes
-----
La segmentation musculaire est accessoire à cette étape :
- pro : permet d'afficher les résultats à la volée, la sauvegarde des images intermédiaires (image sagittale etc) est accessoire
- con : plus lent que de segmenter par la suite avec ODIASP.PredictMUSCLES
"""
#la fonction fast_scandir est récursive et crée une liste des sous dossiers que l'on trouve dans le DossierGeneral
dir_to_scan = fast_scandir(dossierDICOM)
nb_niv1 = 0
nb_niv2 = 0
i=1
for dirs in dir_to_scan:
print ("Chargement du dossier ", i,r"/",len(dir_to_scan)," : " + str(dirs))
i+=1
"""
Import du scanner, labelisation avec le premier reseau et mise a jour du csv
"""
start_time = time.clock() #TIME
volume_numpy, perduSUP, perduBAS, facteur, NOM = import_dicom_to_abdopelv(dirs,
metadata = METADATAS,
csv_path = csv_path,
save= False,
model = MODEL_niveau_de_coupe,
verbose = VERBOSE,
CUPY=CUPY)
finImport_time = time.clock() #TIME
print(finImport_time - start_time, "secondes pour l'import")#TIME
if perduSUP == "Arret" : #export_dicomDir_to_numpyV2 renvoit la variable perdu = "Arret" si jamais elle s'est arrêtée seule
if facteur == "niveau1" :
nb_niv1 +=1
if facteur == "niveau2" :
nb_niv2 +=1
if VERBOSE >1 : print ("\n \n")
else :
"""
Transmission pour prediction de L3
"""
image, image_wl, position = Find_L3 (NOM,
model = Model_segmentation_L3,
NUMPY = volume_numpy,
downsample = 0.5,
csv_path = csv_path,
dirgeneral = DossierDeTravail,
level = WINDOW_CENTER, window = WINDOW_WIDTH,
savepng=DIR_SORTIE,
savedcm=False,
nombredecoupesperduesSUP = perduSUP,nombredecoupesperduesBAS = perduBAS,
facteurAgrandissement = facteur,
verbose = VERBOSE,
CUPY=CUPY)
finL3 = time.clock() #TIME
print(finL3 - finImport_time, "secondes pour la segmentation L3 ")#TIME
"""
On recupere la postion de L3 pour aller chercher le fichier dicom d'origine qui lui correspond
"""
#il faut les trier dans l'ordre de la sequence de scanner (ce qui n'est pas l'ordre alphabetique du nom des fichiers)
inter = {}
list_files = os.listdir(dirs)
for f in list_files:
if not os.path.isdir(f):
f_long = os.path.join(dirs, f)
_ds_ = pydicom.dcmread(f_long,specific_tags =["ImagePositionPatient","SliceThickness"])
inter[f_long]=_ds_.ImagePositionPatient[2]
inter_sorted=sorted(inter.items(), key=lambda x: x[1], reverse=True)
liste_fichiers=[x[0] for x in inter_sorted]
dicom_file = pydicom.dcmread(liste_fichiers[position], force=True)
dicom_file.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian
img_orig_dcm = (dicom_file.pixel_array)
slope=float(dicom_file[0x28,0x1053].value)
intercept=float(dicom_file[0x28,0x1052].value)
img_modif_dcm=(img_orig_dcm*slope) + intercept
img_dcm_wl=ApplyWindowLevel(WINDOW_CENTER,WINDOW_WIDTH,img_modif_dcm)
if DIR_SORTIE != False :
DIR_SORTIE = DirVerification (DIR_SORTIE, DossierProjet=DossierDeTravail,verbose = 0)
name = str(NOM)[:-4]
name += "_" + str(liste_fichiers[position])[-8:]
namepng = name + "_FichierOrigine.png"
namedcm = name + "_FichierOrigine.dcm"
SAVEPATH = os.path.join(DIR_SORTIE,namepng)
im2 = Image.fromarray(img_dcm_wl)
im2 = im2.convert("L")
im2.save(SAVEPATH)
copy2(liste_fichiers[position], os.path.join(DIR_SORTIE,namedcm))
if Model_segmentation_muscles != None :
"""
On teste notre coupe L3 pour segmenter le muscle automatiquement
"""
if VERBOSE >0 :a=1
else: a=0
image_wl = image_wl[np.newaxis,:,:,np.newaxis]
imagepourreseau = image_wl/255
SEGMuscles = Model_segmentation_muscles.predict(imagepourreseau, verbose=a)
"""
Calcul de la surface segmentée
"""
pixelspacing=float(str(dicom_file[0x28,0x0030].value)[1:7])
mask = copy(img_modif_dcm) #Creation d'un masque pour ne garder que les pixels entre 29 et 150UH (cf litterature)
mask[mask > 150] = -1000
mask[mask >= -29] = 1
mask[mask < -29] = 0
SEGMuscles_masked = copy(SEGMuscles[0,:,:,0])
SEGMuscles_masked[SEGMuscles_masked <= 0.5] = 0
SEGMuscles_masked[SEGMuscles_masked > 0.5] = 1
SEGMuscles_masked = np.multiply(SEGMuscles_masked,mask)
surface_0 = np.sum(SEGMuscles_masked)
if VERBOSE >1 :
_, ax = plt.subplots(1,3,figsize=(25,25))
ax[0].imshow(image_wl[0,:,:,0], cmap='gray') #L'image provenant du numpy pour le calcul
ax[1].imshow(img_dcm_wl, cmap='gray')#L'image provenant du dicom chargé à nouveau
ax[1].imshow(SEGMuscles[0,:,:,0], cmap='magma', alpha=0.5)
ax[2].imshow(SEGMuscles_masked, cmap='Reds')
plt.show()
if DIR_SORTIE != False :
namemask = name + "_Mask.png"
SAVEPATHmask = os.path.join(DIR_SORTIE,namemask)
SEGMuscles_masked *=255
SEGMuscles_masked= SEGMuscles_masked.astype(np.uint8)
im_mask = Image.fromarray(SEGMuscles_masked)
im_mask.save(SAVEPATHmask)
if csv_path != False: #mettre a jour le csv avec pandas
df= | pandas.read_csv(csv_path, delimiter=",") | pandas.read_csv |
"""Prepare plots of statistical significance of features."""
import os
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
import seaborn as sns
from sklearn.inspection import partial_dependence
from camcan.processing import permutation_importance
from camcan.utils import train_stacked_regressor
# common subjects 574
CV = 10
N_JOBS = 4
PANDAS_OUT_FILE = '../../data/age_prediction_exp_data.h5'
STRUCTURAL_DATA = '../../data/structural/structural_data.h5'
CONNECT_DATA_CORR = '../../data/connectivity/connect_data_correlation.h5'
CONNECT_DATA_TAN = '../../data/connectivity/connect_data_tangent.h5'
MEG_SOURCE_SPACE_DATA = '../../data/meg_source_space_data.h5'
FREQ_BANDS = ('alpha',
'beta_high',
'beta_low',
'delta',
'gamma_high',
'gamma_lo',
'gamma_mid',
'low',
'theta')
# store mae, learning curves for summary plots
regression_mae = pd.DataFrame(columns=range(0, CV), dtype=float)
regression_r2 = pd.DataFrame(columns=range(0, CV), dtype=float)
learning_curves = {}
# read information about subjects
subjects_data = pd.read_csv('../../data/participant_data.csv', index_col=0)
# for storing predictors data
subjects_predictions = pd.DataFrame(subjects_data.age,
index=subjects_data.index,
dtype=float)
# 595 subjects
meg_data = pd.read_hdf(MEG_SOURCE_SPACE_DATA, key='meg')
columns_to_exclude = ('band', 'fmax', 'fmin', 'subject')
parcellation_labels = [c for c in meg_data.columns if c
not in columns_to_exclude]
band_data = [meg_data[meg_data.band == bb].set_index('subject')[
parcellation_labels] for bb in FREQ_BANDS]
meg_data = pd.concat(band_data, axis=1, join='inner', sort=False)
meg_subjects = set(meg_data.index)
# read features
area_data = pd.read_hdf(STRUCTURAL_DATA, key='area')
thickness_data = pd.read_hdf(STRUCTURAL_DATA, key='thickness')
volume_data = pd.read_hdf(STRUCTURAL_DATA, key='volume')
area_data = area_data.dropna()
thickness_data = thickness_data.dropna()
volume_data = volume_data.dropna()
# take only subjects that are both in MEG and Structural MRI
structural_subjects = set(area_data.index)
common_subjects = meg_subjects.intersection(structural_subjects)
area_data = area_data.loc[common_subjects]
thickness_data = thickness_data.loc[common_subjects]
volume_data = volume_data.loc[common_subjects]
meg_data = meg_data.loc[common_subjects]
# read connectivity data
connect_data_tangent_basc = pd.read_hdf(CONNECT_DATA_TAN, key='basc197')
connect_data_r2z_basc = | pd.read_hdf(CONNECT_DATA_CORR, key='basc197') | pandas.read_hdf |
import sys
from sqlalchemy import create_engine
import pandas as pd
import pickle
import nltk
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import classification_report
from sklearn.base import BaseEstimator, TransformerMixin
def load_data(database_filepath):
'''
Load dataframe from a database
'''
engine = create_engine('sqlite:///'+database_filepath)
df = pd.read_sql('SELECT * FROM message', engine)
X = df.message
y = df.iloc[:, 4:]
category_names = list(y.columns)
return X, y, category_names
def tokenize(text):
'''
Tokenize and lemmatize the text
'''
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
# tokenize and lemmatize every text, and save processed tokens into a list
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
class TextLengthExtractor(BaseEstimator, TransformerMixin):
'''
A class to get the length of each tokenized text, and apply the function to all cells
'''
def textlength(self, text):
return len(tokenize(text))
def fit(self, x, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.textlength)
return pd.DataFrame(X_tagged)
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
'''
A class to see if the first letter is a verb, and apply the function to all cells
'''
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, x, y=None):
return self
def transform(self, X):
X_tagged = | pd.Series(X) | pandas.Series |
# 지역, 서비스 코드 파서
import pandas as pd
import numpy as np
import re
import datetime
from .rq_class import *
from .search import findTag, parseAll
apinfo = ApiInfo('1a%2FLc1roxNrXp8QeIitbwvJdfpUYIFTcrbii4inJk3m%2BVpFvZSWjHFmOfWiH9T7TMbv07j5sDnJ5yefVDqHXfA%3D%3D', 'http://api.visitkorea.or.kr/openapi/service/rest/KorWithService/')
def getCode(obj):
result = {}
for i in range(0, len(obj)):
tmp = list(re.split('[<>]', str(obj[i])))
while '' in tmp: tmp.remove('')
result[findTag(tmp, 'name')] = findTag(tmp, 'code')
return result
def geoCode(apinfo):
# 대분류
gcode_req = tourReq('ETC', 'AppTest', apinfo.mykey)
g_req = gcode_req.makeReq(apinfo.url, 'areaCode')
g1_dic = getCode(parseAll(g_req))
result_tmp=[]
for tags, codes in g1_dic.items():
input_tmp=[]
gcode_req2 = tourReq('ETC', 'AppTest', apinfo.mykey)
gcode_req2.addPara('areaCode', codes)
req2 = gcode_req2.makeReq(apinfo.url, 'areaCode')
g2_dic = getCode(parseAll(req2))
for tag2, code2 in g2_dic.items():
input_tmp.append(tags)
input_tmp.append(codes)
input_tmp.append(tag2)
input_tmp.append(code2)
# 리스트 단위로 행추가
result_tmp.append(input_tmp)
#
input_tmp=[]
df_geo = pd.DataFrame(result_tmp, columns=['city','city_code','sigungu','sigungu_code'])
# CSV로 내보내기
geo_code_date = datetime.datetime.today()
geo_code_date = geo_code_date.strftime('%Y-%m-%d')
df_geo.to_csv("resources/geo_code_{}.csv".format(geo_code_date), encoding='euc-kr')
def serviceCode(apinfo):
# 대분류
scode_req = tourReq('ETC', 'AppTest', apinfo.mykey)
req_cat1 = scode_req.makeReq(apinfo.url, 'categoryCode')
cat1_dic = getCode(parseAll(req_cat1))
result_tmp=[]
for tags, codes in cat1_dic.items():
input_tmp = []
# 중분류
req2_tmp = tourReq('ETC', 'AppTest', apinfo.mykey)
req2_tmp.addPara('cat1', codes)
req2 = req2_tmp.makeReq(apinfo.url, 'categoryCode')
cat2_dic = getCode(parseAll(req2))
# 소분류
for tag2, code2 in cat2_dic.items():
req2_tmp.addPara('cat2', code2)
req3 = req2_tmp.makeReq(apinfo.url, 'categoryCode')
cat3_dic = getCode(parseAll(req3))
for tag3, code3 in cat3_dic.items():
input_tmp.append(tags)
input_tmp.append(codes)
input_tmp.append(tag2)
input_tmp.append(code2)
input_tmp.append(tag3)
input_tmp.append(code3)
# 리스트단위로 행추가
result_tmp.append(input_tmp)
#
input_tmp = []
df_cat = | pd.DataFrame(result_tmp, columns=['tag1','code1','tag2','code2','tag3','code3']) | pandas.DataFrame |
import numpy as np
import pandas as pd
pd.set_option('display.expand_frame_repr', False)
from scipy.stats import mode
import warnings
import ntpath, pathlib
import copy
import itertools
import os
from .collect import Collect
# d6tcollect.init(__name__)
from .helpers import *
from .utils import PrintLogger
# ******************************************************************
# helpers
# ******************************************************************
def _dfconact(df):
return pd.concat(itertools.chain.from_iterable(df), sort=False, copy=False, join='inner', ignore_index=True)
def _direxists(fname, logger):
fdir = os.path.dirname(fname)
if fdir and not os.path.exists(fdir):
if logger:
logger.send_log('creating ' + fdir, 'ok')
os.makedirs(fdir)
return True
# ******************************************************************
# combiner
# ******************************************************************
class CombinerCSV(object, metaclass=Collect):
"""
Core combiner class. Sniffs columns, generates preview, combines aka stacks to various output formats.
Args:
fname_list (list): file names, eg ['a.csv','b.csv']
sep (string): CSV delimiter, see pandas.read_csv()
has_header (boolean): data has header row
nrows_preview (int): number of rows in preview
chunksize (int): number of rows to read into memory while processing, see pandas.read_csv()
read_csv_params (dict): additional parameters to pass to pandas.read_csv()
columns_select (list): list of column names to keep
columns_select_common (bool): keep only common columns. Use this instead of `columns_select`
columns_rename (dict): dict of columns to rename `{'name_old':'name_new'}
add_filename (bool): add filename column to output data frame. If `False`, will not add column.
apply_after_read (function): function to apply after reading each file. needs to return a dataframe
log (bool): send logs to logger
logger (object): logger object with `send_log()`
"""
def __init__(self, fname_list, sep=',', nrows_preview=3, chunksize=1e6, read_csv_params=None,
columns_select=None, columns_select_common=False, columns_rename=None, add_filename=True,
apply_after_read=None, log=True, logger=None):
if not fname_list:
raise ValueError("Filename list should not be empty")
self.fname_list = np.sort(fname_list)
self.nrows_preview = nrows_preview
self.read_csv_params = read_csv_params
if not self.read_csv_params:
self.read_csv_params = {}
if not 'sep' in self.read_csv_params:
self.read_csv_params['sep'] = sep
if not 'chunksize' in self.read_csv_params:
self.read_csv_params['chunksize'] = chunksize
self.logger = logger
if not logger and log:
self.logger = PrintLogger()
if not log:
self.logger = None
self.sniff_results = None
self.add_filename = add_filename
self.columns_select = columns_select
self.columns_select_common = columns_select_common
if columns_select and columns_select_common:
warnings.warn('columns_select will override columns_select_common, pick either one')
self.columns_rename = columns_rename
self._columns_reindex = None
self._columns_rename_dict = None
self.apply_after_read = apply_after_read
self.df_combine_preview = None
if self.columns_select:
if max(collections.Counter(columns_select).values())>1:
raise ValueError('Duplicate entries in columns_select')
def _read_csv_yield(self, fname, read_csv_params):
self._columns_reindex_available()
dfs = pd.read_csv(fname, **read_csv_params)
for dfc in dfs:
if self.columns_rename and self._columns_rename_dict[fname]:
dfc = dfc.rename(columns=self._columns_rename_dict[fname])
dfc = dfc.reindex(columns=self._columns_reindex)
if self.apply_after_read:
dfc = self.apply_after_read(dfc)
if self.add_filename:
dfc['filepath'] = fname
dfc['filename'] = ntpath.basename(fname)
yield dfc
def sniff_columns(self):
"""
Checks column consistency by reading top nrows in all files. It checks both presence and order of columns in all files
Returns:
dict: results dictionary with
files_columns (dict): dictionary with information, keys = filename, value = list of columns in file
columns_all (list): all columns in files
columns_common (list): only columns present in every file
is_all_equal (boolean): all files equal in all files?
df_columns_present (dataframe): which columns are present in which file?
df_columns_order (dataframe): where in the file is the column?
"""
if self.logger:
self.logger.send_log('sniffing columns', 'ok')
read_csv_params = copy.deepcopy(self.read_csv_params)
read_csv_params['dtype'] = str
read_csv_params['nrows'] = self.nrows_preview
read_csv_params['chunksize'] = None
# read nrows of every file
self.dfl_all = []
for fname in self.fname_list:
# todo: make sure no nrows param in self.read_csv_params
df = pd.read_csv(fname, **read_csv_params)
self.dfl_all.append(df)
# process columns
dfl_all_col = [df.columns.tolist() for df in self.dfl_all]
col_files = dict(zip(self.fname_list, dfl_all_col))
col_common = list_common(list(col_files.values()))
col_all = list_unique(list(col_files.values()))
# find index in column list so can check order is correct
df_col_present = {}
for iFileName, iFileCol in col_files.items():
df_col_present[iFileName] = [iCol in iFileCol for iCol in col_all]
df_col_present = | pd.DataFrame(df_col_present, index=col_all) | pandas.DataFrame |
import webbrowser
import numpy as np
import pandas as pd
import tax_utils as tut
from tax_calculator import TaxCalculator
class NorwegianTax(TaxCalculator):
"""
to facilitate easy input
add random text to trigger a code push...
"""
def __init__(self, salary=0, birth_year=1978, tax_year=None, gains_from_sale_fondskonto_share_comp=0, gains_from_sale_fondskonto_interest_comp=0, gains_from_sale_of_shares_ask=0, property_taxable_value=0, pension=0, pension_months=12, pension_percent=100, property_sale_proceeds=0, rental_income=0, property_sale_loss=0, bank_deposits=0,
bank_interest_income=0, interest_expenses=0, dividends=0, mutual_fund_dividends=0, gains_from_sale_of_shares=0, mutual_fund_interest_comp_profit=0, mutual_fund_interest_comp_profit_combi_fund=0, mutual_fund_share_comp_profit=0, mutual_fund_share_comp_profit_combi_fund=0, loss_fondskonto_shares=0, loss_fondskonto_interest=0, loss_ask_sale=0,
loss_from_sale_of_shares=0, loss_from_sale_mutual_fund_share_comp=0, loss_from_sale_mutual_fund_share_comp_combi_fund=0, loss_from_sale_mutual_fund_interest_comp=0,
loss_from_sale_mutual_fund_interest_comp_combi_fund=0, mutual_fund_wealth_share_comp=0, mutual_fund_wealth_interest_comp=0, wealth_in_shares=0, wealth_in_unlisted_shares=0, wealth_ask_cash=0, wealth_ask_shares=0, wealth_fondskonto_cash_interest=0, wealth_fondskonto_shares=0, municipality='0402', case_idx=None):
self._salary = salary
self._birth_year = birth_year
if tax_year is None:
tax_year = pd.to_datetime('today').year
tax_url = "https://skatteberegning.app.skatteetaten.no/%d" % tax_year
self._gains_from_sale_fondskonto_share_comp = gains_from_sale_fondskonto_share_comp
self._gains_from_sale_fondskonto_interest_comp = gains_from_sale_fondskonto_interest_comp
self._gains_from_sale_of_shares_ask = gains_from_sale_of_shares_ask
self._property_taxable_value = property_taxable_value
self._pension = pension
self._pension_months = pension_months
self._pension_percent = pension_percent
self._property_sale_proceeds = property_sale_proceeds
self._rental_income = rental_income
self._property_sale_loss = property_sale_loss
self._bank_deposits = bank_deposits
self._bank_interest_income = bank_interest_income
self._interest_expenses = interest_expenses
self._dividends = dividends
self._mutual_fund_dividends = mutual_fund_dividends
self._gains_from_sale_of_shares = gains_from_sale_of_shares
self._mutual_fund_interest_comp_profit = mutual_fund_interest_comp_profit
self._mutual_fund_interest_comp_profit_combi_fund = mutual_fund_interest_comp_profit_combi_fund
self._mutual_fund_share_comp_profit = mutual_fund_share_comp_profit
self._mutual_fund_share_comp_profit_combi_fund = mutual_fund_share_comp_profit_combi_fund
self._loss_fondskonto_shares = loss_fondskonto_shares
self._loss_fondskonto_interest = loss_fondskonto_interest
self._loss_ask_sale = loss_ask_sale
self._loss_from_sale_of_shares = loss_from_sale_of_shares
self._loss_from_sale_mutual_fund_share_comp = loss_from_sale_mutual_fund_share_comp
self._loss_from_sale_mutual_fund_share_comp_combi_fund = loss_from_sale_mutual_fund_share_comp_combi_fund
self._loss_from_sale_mutual_fund_interest_comp = loss_from_sale_mutual_fund_interest_comp
self._loss_from_sale_mutual_fund_interest_comp_combi_fund = loss_from_sale_mutual_fund_interest_comp_combi_fund
self._mutual_fund_wealth_share_comp = mutual_fund_wealth_share_comp
self._mutual_fund_wealth_interest_comp = mutual_fund_wealth_interest_comp
self._wealth_in_shares = wealth_in_shares
self._wealth_in_unlisted_shares = wealth_in_unlisted_shares
self._wealth_ask_cash = wealth_ask_cash
self._wealth_ask_shares = wealth_ask_shares
self._wealth_fondskonto_cash_interest = wealth_fondskonto_cash_interest
self._wealth_fondskonto_shares = wealth_fondskonto_shares
self._municipality = municipality
super().__init__(
jurisdiction='NOR',
tax_year=tax_year,
tax_url=tax_url,
case_idx=case_idx)
@staticmethod
def tax_payable(basis=0, rate=0, limit=0, deduction=0,
apply_rounding=False):
"""
convenient utility function
"""
retval = max(basis - deduction - limit, 0) * rate
if apply_rounding:
return tut.tax_round(retval)
return retval
@property
def salary(self):
return self._salary
@salary.setter
def salary(self, value):
self._salary = value
@property
def municipality(self):
return self._municipality
@municipality.setter
def municipality(self, value):
self._municipality = value
@property
def birth_year(self):
return self._birth_year
@birth_year.setter
def birth_year(self, value):
self._birth_year = value
@property
def gains_from_sale_fondskonto_share_comp(self):
return self._gains_from_sale_fondskonto_share_comp
@gains_from_sale_fondskonto_share_comp.setter
def gains_from_sale_fondskonto_share_comp(self, value):
self._gains_from_sale_fondskonto_share_comp = value
@property
def gains_from_sale_fondskonto_interest_comp(self):
return self._gains_from_sale_fondskonto_interest_comp
@gains_from_sale_fondskonto_interest_comp.setter
def gains_from_sale_fondskonto_interest_comp(self, value):
self._gains_from_sale_fondskonto_interest_comp = value
@property
def gains_from_sale_of_shares_ask(self):
return self._gains_from_sale_of_shares_ask
@gains_from_sale_of_shares_ask.setter
def gains_from_sale_of_shares_ask(self, value):
self._gains_from_sale_of_shares_ask = value
@property
def property_taxable_value(self):
return self._property_taxable_value
@property_taxable_value.setter
def property_taxable_value(self, value):
self._property_taxable_value = value
@property
def pension(self):
return self._pension
@pension.setter
def pension(self, value):
self._pension = value
@property
def pension_months(self):
return self._pension_months
@pension_months.setter
def pension_months(self, value):
self._pension_months = value
@property
def pension_percent(self):
return self._pension_percent
@pension_percent.setter
def pension_percent(self, value):
self._pension_percent = value
@property
def property_sale_proceeds(self):
return self._property_sale_proceeds
@property_sale_proceeds.setter
def property_sale_proceeds(self, value):
self._property_sale_proceeds = value
@property
def rental_income(self):
return self._rental_income
@rental_income.setter
def rental_income(self, value):
self._rental_income = value
@property
def property_sale_loss(self):
return self._property_sale_loss
@property_sale_loss.setter
def property_sale_loss(self, value):
self._property_sale_loss = value
@property
def bank_deposits(self):
return self._bank_deposits
@bank_deposits.setter
def bank_deposits(self, value):
self._bank_deposits = value
@property
def bank_interest_income(self):
return self._bank_interest_income
@bank_interest_income.setter
def bank_interest_income(self, value):
self._bank_interest_income = value
@property
def interest_expenses(self):
return self._interest_expenses
@interest_expenses.setter
def interest_expenses(self, value):
self._interest_expenses = value
@property
def dividends(self):
return self._dividends
@dividends.setter
def dividends(self, value):
self._dividends = value
@property
def mutual_fund_dividends(self):
return self._mutual_fund_dividends
@mutual_fund_dividends.setter
def mutual_fund_dividends(self, value):
self._mutual_fund_dividends = value
@property
def gains_from_sale_of_shares(self):
return self._gains_from_sale_of_shares
@gains_from_sale_of_shares.setter
def gains_from_sale_of_shares(self, value):
self._gains_from_sale_of_shares = value
@property
def mutual_fund_interest_comp_profit(self):
return self._mutual_fund_interest_comp_profit
@mutual_fund_interest_comp_profit.setter
def mutual_fund_interest_comp_profit(self, value):
self._mutual_fund_interest_comp_profit = value
@property
def mutual_fund_interest_comp_profit_combi_fund(self):
return self._mutual_fund_interest_comp_profit_combi_fund
@mutual_fund_interest_comp_profit_combi_fund.setter
def mutual_fund_interest_comp_profit_combi_fund(self, value):
self._mutual_fund_interest_comp_profit_combi_fund = value
@property
def mutual_fund_share_comp_profit(self):
return self._mutual_fund_share_comp_profit
@mutual_fund_share_comp_profit.setter
def mutual_fund_share_comp_profit(self, value):
self._mutual_fund_share_comp_profit = value
@property
def mutual_fund_share_comp_profit_combi_fund(self):
return self._mutual_fund_share_comp_profit_combi_fund
@mutual_fund_share_comp_profit_combi_fund.setter
def mutual_fund_share_comp_profit_combi_fund(self, value):
self._mutual_fund_share_comp_profit_combi_fund = value
@property
def loss_fondskonto_shares(self):
return self._loss_fondskonto_shares
@loss_fondskonto_shares.setter
def loss_fondskonto_shares(self, value):
self._loss_fondskonto_shares = value
@property
def loss_fondskonto_interest(self):
return self._loss_fondskonto_interest
@loss_fondskonto_interest.setter
def loss_fondskonto_interest(self, value):
self._loss_fondskonto_interest = value
@property
def loss_ask_sale(self):
return self._loss_ask_sale
@loss_ask_sale.setter
def loss_ask_sale(self, value):
self._loss_ask_sale = value
@property
def loss_from_sale_of_shares(self):
return self._loss_from_sale_of_shares
@loss_from_sale_of_shares.setter
def loss_from_sale_of_shares(self, value):
self._loss_from_sale_of_shares = value
@property
def loss_from_sale_mutual_fund_share_comp(self):
return self._loss_from_sale_mutual_fund_share_comp
@loss_from_sale_mutual_fund_share_comp.setter
def loss_from_sale_mutual_fund_share_comp(self, value):
self._loss_from_sale_mutual_fund_share_comp = value
@property
def loss_from_sale_mutual_fund_share_comp_combi_fund(self):
return self._loss_from_sale_mutual_fund_share_comp_combi_fund
@loss_from_sale_mutual_fund_share_comp_combi_fund.setter
def loss_from_sale_mutual_fund_share_comp_combi_fund(self, value):
self._loss_from_sale_mutual_fund_share_comp_combi_fund = value
@property
def loss_from_sale_mutual_fund_interest_comp(self):
return self._loss_from_sale_mutual_fund_interest_comp
@loss_from_sale_mutual_fund_interest_comp.setter
def loss_from_sale_mutual_fund_interest_comp(self, value):
self._loss_from_sale_mutual_fund_interest_comp = value
@property
def loss_from_sale_mutual_fund_interest_comp_combi_fund(self):
return self._loss_from_sale_mutual_fund_interest_comp_combi_fund
@loss_from_sale_mutual_fund_interest_comp_combi_fund.setter
def loss_from_sale_mutual_fund_interest_comp_combi_fund(self, value):
self._loss_from_sale_mutual_fund_interest_comp_combi_fund = value
@property
def mutual_fund_wealth_share_comp(self):
return self._mutual_fund_wealth_share_comp
@mutual_fund_wealth_share_comp.setter
def mutual_fund_wealth_share_comp(self, value):
self._mutual_fund_wealth_share_comp = value
@property
def mutual_fund_wealth_interest_comp(self):
return self._mutual_fund_wealth_interest_comp
@mutual_fund_wealth_interest_comp.setter
def mutual_fund_wealth_interest_comp(self, value):
self._mutual_fund_wealth_interest_comp = value
@property
def wealth_in_shares(self):
return self._wealth_in_shares
@wealth_in_shares.setter
def wealth_in_shares(self, value):
self._wealth_in_shares = value
@property
def wealth_in_unlisted_shares(self):
return self._wealth_in_unlisted_shares
@wealth_in_unlisted_shares.setter
def wealth_in_unlisted_shares(self, value):
self._wealth_in_unlisted_shares = value
@property
def wealth_ask_cash(self):
return self._wealth_ask_cash
@wealth_ask_cash.setter
def wealth_ask_cash(self, value):
self._wealth_ask_cash = value
@property
def wealth_ask_shares(self):
return self._wealth_ask_shares
@wealth_ask_shares.setter
def wealth_ask_shares(self, value):
self._wealth_ask_shares = value
@property
def wealth_fondskonto_cash_interest(self):
return self._wealth_fondskonto_cash_interest
@wealth_fondskonto_cash_interest.setter
def wealth_fondskonto_cash_interest(self, value):
self._wealth_fondskonto_cash_interest = value
@property
def wealth_fondskonto_shares(self):
return self._wealth_fondskonto_shares
@wealth_fondskonto_shares.setter
def wealth_fondskonto_shares(self, value):
self._wealth_fondskonto_shares = value
@property
def share_related_income(self):
return self.gains_from_sale_fondskonto_share_comp + self.dividends + self.mutual_fund_dividends + self.gains_from_sale_of_shares + self.gains_from_sale_of_shares_ask + self.mutual_fund_share_comp_profit + \
self.mutual_fund_share_comp_profit_combi_fund - self.loss_fondskonto_shares - self.loss_from_sale_of_shares - \
self.loss_from_sale_mutual_fund_share_comp - \
self.loss_from_sale_mutual_fund_share_comp_combi_fund - self.loss_ask_sale
@property
def interest_related_income(self):
return self.gains_from_sale_fondskonto_interest_comp - self.interest_expenses + self.mutual_fund_interest_comp_profit + self.mutual_fund_interest_comp_profit_combi_fund + \
self.bank_interest_income - self.loss_fondskonto_interest - self.loss_from_sale_mutual_fund_interest_comp - \
self.loss_from_sale_mutual_fund_interest_comp_combi_fund
@property
def property_related_income(self):
return self.rental_income + self.property_sale_proceeds - self.property_sale_loss
@property
def non_pension_income(self):
return self.salary + self.share_related_income + \
self.interest_related_income + self.property_related_income
@property
def income_tax_basis(self):
if abs(self.non_pension_income) < 1e-4:
return max(self.pension - self.pension_only_minimum_deduction, 0)
if abs(self.pension) < 1e-4:
return max(self.salary - self.salary_only_minimum_deduction + self.parameter('share_income_grossup')
* self.share_related_income + self.interest_related_income + self.property_related_income, 0)
return max(self.salary + self.pension - self.pension_and_income_minimum_deduction + self.parameter('share_income_grossup')
* self.share_related_income + self.interest_related_income + self.property_related_income, 0)
@property
def state_wealth_tax_basis(self):
return (self.mutual_fund_wealth_share_comp + self.wealth_in_shares + self.wealth_in_unlisted_shares + self.wealth_ask_shares + self.wealth_fondskonto_shares) * \
self.parameter('percentage_of_taxable_wealth_cap_shares') + self.bank_deposits + self.property_taxable_value + \
self.mutual_fund_wealth_interest_comp + \
self.wealth_fondskonto_cash_interest + self.wealth_ask_cash
@property
def pension_deduction_raw(self):
return max(min(self.pension * self.parameter('pension_deduction_multiplier'),
self.parameter('max_pension_deduction')), self.parameter('min_pension_deduction'))
@property
def income_deduction_raw(self):
return max(min(self.salary * self.parameter('deduction_multiplier'),
self.parameter('max_deduction_limit')), self.parameter('min_deduction_limit'))
@property
def pension_and_income_minimum_deduction(self):
"""
does what it says
"""
if (abs(self.pension) < 1e-4) and (abs(self.salary) < 1e-4):
return 0
# you can't deduct more than what you earn:
income_deduction = min(self.income_deduction_raw, self.salary)
combo_deduction = self.pension_deduction_raw + \
max(min(self.salary * self.parameter('deduction_multiplier'), self.parameter('max_deduction_limit')),
min(self.parameter('min_pension_deduction'), self.salary, self.pension))
return min(max(income_deduction, combo_deduction),
self.parameter('max_deduction_limit'))
@property
def salary_only_minimum_deduction(self):
"""
this could be read from db, of course
https://www.skatteetaten.no/en/rates/minimum-standard-deduction/
"""
return int(min(self.income_deduction_raw, self.salary))
@property
def pension_only_minimum_deduction(self):
"""
does what it says
"""
return min(self.pension_deduction_raw, self.pension)
@property
def bracket_tax(self):
"""
Calculates the bracket tax
"""
tot_inc = self.salary + self.pension
if tot_inc <= self.parameter('trinnskatt_l1'):
return 0
if self.parameter('trinnskatt_l1') < tot_inc <= self.parameter(
'trinnskatt_l2'):
return tut.tax_round(self.parameter(
'trinnskatt_r1') * (tot_inc - self.parameter('trinnskatt_l1')))
if self.parameter('trinnskatt_l2') < tot_inc <= self.parameter(
'trinnskatt_l3'):
return tut.tax_round(self.parameter('trinnskatt_r2') * (tot_inc - self.parameter('trinnskatt_l2')) +
self.parameter('trinnskatt_r1') * (self.parameter('trinnskatt_l2') - self.parameter('trinnskatt_l1')))
if self.parameter('trinnskatt_l3') < tot_inc <= self.parameter(
'trinnskatt_l4'):
return tut.tax_round(self.parameter('trinnskatt_r3') * (tot_inc - self.parameter('trinnskatt_l3')) + self.parameter('trinnskatt_r2') *
(self.parameter('trinnskatt_l3') - self.parameter('trinnskatt_l2')) + self.parameter('trinnskatt_r1') * (self.parameter('trinnskatt_l2') - self.parameter('trinnskatt_l1')))
return tut.tax_round(self.parameter('trinnskatt_r4') * (tot_inc - self.parameter('trinnskatt_l4')) + self.parameter('trinnskatt_r3') * (self.parameter('trinnskatt_l4') -
self.parameter('trinnskatt_l3')) + self.parameter('trinnskatt_r2') * (self.parameter('trinnskatt_l3') - self.parameter('trinnskatt_l2')) + self.parameter('trinnskatt_r1') * (self.parameter('trinnskatt_l2') - self.parameter('trinnskatt_l1')))
@property
def age(self):
return pd.to_datetime('today').year - self.birth_year
@property
def bracket_tax_level(self):
"""
for debugging the excel sheet
"""
tot_inc = self.salary + self.pension
if tot_inc <= self.parameter('trinnskatt_l1'):
return "Below level 1"
if self.parameter('trinnskatt_l1') < tot_inc <= self.parameter(
'trinnskatt_l2'):
return "Between level 1 and 2"
if self.parameter('trinnskatt_l2') < tot_inc <= self.parameter(
'trinnskatt_l3'):
return "Between lebel 2 and 3"
if self.parameter('trinnskatt_l3') < tot_inc <= self.parameter(
'trinnskatt_l4'):
return "Between level 3 and 4"
return "Above level 4"
@property
def attribute_map(self):
karta = {'salary': '2.1.1', 'pension': '2.2.1', 'bank_interest_income': '3.1.1',
'interest_expenses': '3.3.1', 'property_taxable_value': '4.3.2', 'property_sale_proceeds': '2.8.4',
'property_sale_loss': '3.3.6', 'rental_income': '2.8.2', 'bank_deposits': '4.1.1',
'gains_from_sale_fondskonto_share_comp': '3.1.4', 'gains_from_sale_fondskonto_interest_comp': '3.1.4',
'dividends': '3.1.5', 'mutual_fund_dividends': '3.1.6', 'gains_from_sale_of_shares': '3.1.8',
'gains_from_sale_of_shares_ask': '3.1.8'}
for field in ['mutual_fund_interest_comp_profit', 'mutual_fund_interest_comp_profit_combi_fund',
'mutual_fund_share_comp_profit', 'mutual_fund_share_comp_profit_combi_fund']:
karta[field] = '3.1.9'
for field in ['loss_fondskonto_shares', 'loss_fondskonto_interest']:
karta[field] = '3.3.7'
for field in ['loss_ask_sale', 'loss_from_sale_of_shares']:
karta[field] = '3.3.8'
for field in ['loss_from_sale_mutual_fund_share_comp', 'loss_from_sale_mutual_fund_share_comp_combi_fund',
'loss_from_sale_mutual_fund_interest_comp', 'loss_from_sale_mutual_fund_interest_comp_combi_fund']:
karta[field] = '3.3.9'
karta['mutual_fund_wealth_share_comp'] = '4.1.4'
karta['mutual_fund_wealth_interest_comp'] = '4.1.5'
karta['wealth_in_shares'] = '4.1.7'
for field in ['wealth_in_unlisted_shares',
'wealth_ask_cash', 'wealth_ask_shares']:
karta[field] = '4.1.8'
for field in ['wealth_fondskonto_cash_interest',
'wealth_fondskonto_shares']:
karta[field] = '4.5.2'
return karta
def compare_calculated_tax_vs_correct_tax(
self, atol=1e-8, rtol=1e-6, check_basis=False):
"""
compares the config vs our calculations
It gives a more detailed breakdown so you can compare the components such as different basis etc.
"""
df_calc = self.tax_breakdown()
df_true = self.parsed_official_response()
out = []
elements = [['Formueskatt stat', 'formueskattTilStat'], ['Formueskatt kommune', 'formueskattTilKommune'],
['Inntektsskatt til kommune', 'inntektsskattTilKommune'], [
'Inntektsskatt til fylkeskommune', 'inntektsskattTilFylkeskommune'],
['Fellesskatt', 'fellesskatt'], ['Trinnskatt', 'trinnskatt'], ['Trygdeavgift', 'sumTrygdeavgift'], ['Sum skattefradrag', 'Sum skattefradrag']]
for calc_comp, correct_comp in elements:
tax_calc = df_calc.query("Skatt == '%s'" % calc_comp)
tax_calc_basis = tax_calc['Grunnlag'].item()
tax_calc_value = tut.tax_round(tax_calc['Beloep'].item())
# pdb.set_trace()
# if 'Inntekt' in calc_comp:
# pdb.set_trace()
tax_correct = df_true.query("tax_type == '%s'" % correct_comp)
if tax_correct.empty:
tax_correct_basis = 0
else:
tax_correct_basis = tax_correct['tax_basis'].item()
if 'skattefradrag' in calc_comp:
tax_calc_value *= -1
if not tax_correct.empty:
tax_correct_value = tax_correct['tax'].item()
else:
tax_correct_value = 0
error_basis = np.abs(tax_calc_basis - tax_correct_basis)
tol_basis = atol + rtol * np.abs(tax_correct_basis)
error_value = np.abs(tax_calc_value - tax_correct_value)
tol_value = atol + rtol * np.abs(tax_correct_value)
basis_pass = (error_basis <= tol_basis)
value_pass = (error_value <= tol_value)
if check_basis:
test_string = ''
if basis_pass and value_pass:
test_string = '++'
elif basis_pass and not value_pass:
test_string = '+-'
elif value_pass and not basis_pass:
test_string = '-+'
else:
test_string = '--'
else:
test_string = '+' if value_pass else '-'
if check_basis:
out.append([calc_comp,
tax_calc_basis,
tax_correct_basis,
tax_calc_value,
tax_correct_value,
error_basis,
error_value,
tol_basis,
tol_value,
test_string])
else:
out.append([calc_comp, tax_calc_value, tax_correct_value,
error_value, tol_value, test_string])
# check total tax:
# pdb.set_trace()
total_calculated_tax = df_calc.query(
"Skatt == 'Din Skatt'").Beloep.item()
total_tax = df_true.query("tax_type == 'total'").tax.item()
error_value = np.abs(total_calculated_tax - total_tax)
tol_value = atol + rtol * np.abs(total_tax)
basis_pass = True
value_pass = (error_value <= tol_value)
if check_basis:
test_string = '+' + '+' if value_pass else '-'
out.append(['Total skatt', np.nan, np.nan, total_calculated_tax,
total_tax, 0, error_value, 0, tol_value, test_string])
else:
test_string = '+' if value_pass else '-'
out.append(['Total skatt', total_calculated_tax,
total_tax, error_value, tol_value, test_string])
if check_basis:
return pd.DataFrame(out, columns=['component', 'basis_calc', 'basis_corr', 'value_calc',
'value_corr', 'basis_error', 'value_error', 'basis_tol', 'value_tol', 'test_pass'])
return pd.DataFrame(out, columns=[
'component', 'value_calc', 'value_corr', 'value_error', 'value_tol', 'test_pass'])
def state_wealth_tax(self):
return self.tax_payable(basis=self.state_wealth_tax_basis, rate=self.parameter(
'state_wealth_tax_rate'), limit=self.parameter('wealth_tax_lower_limit'))
def municipal_wealth_tax(self):
return self.tax_payable(basis=self.state_wealth_tax_basis, rate=self.parameter(
'municipal_wealth_tax_rate'), limit=self.parameter('wealth_tax_lower_limit'))
def explain_attribute(self, attr='pension'):
assert attr in self.attribute_map, "'%s' is not a valid attribute!" % attr
return self.explain_tax_item(self.attribute_map[attr])
def explain_tax_item(self, item_no='3.1.8'):
"""
just show the web-page for the item
"""
text = item_no.replace('.', '/')
url = "https://www.skatteetaten.no/person/skatt/skattemelding/finn-post/%s" % text
return webbrowser.open(url)
def _income_tax(self, basis_name='felles', apply_rounding=True):
"""
some gentle indirection
"""
if basis_name == 'felles':
return self.tax_payable(basis=self.income_tax_basis, rate=self.parameter('felles_tax_rate'),
deduction=self.parameter('personal_deduction'), apply_rounding=apply_rounding)
if basis_name == 'fylke':
return self.tax_payable(basis=self.income_tax_basis, rate=self.parameter('fylke_tax_rate'),
deduction=self.parameter('personal_deduction'), apply_rounding=apply_rounding)
if basis_name == 'kommun':
return self.tax_payable(basis=self.income_tax_basis, rate=self.parameter('municipal_tax_rate'),
deduction=self.parameter('personal_deduction'), apply_rounding=apply_rounding)
raise Exception(
"We only basis in {felles, fylke, kommun}, not '%s'" %
basis_name)
def municipal_income_tax(self):
return self._income_tax(basis_name='kommun')
def common_income_tax(self):
return self._income_tax(basis_name='felles')
def county_income_tax(self):
return self._income_tax(basis_name='fylke')
def national_insurance(self, apply_rounding=False):
"""
[NO]
seems to be 8.2% above 81.4k, goes up linearly from low limit to that?
ah, it can't be more than 25% of the amount above the lower limit (this isn't mentioned on the official site)
https://no.wikipedia.org/wiki/Trygdeavgift
"""
rate = self.parameter('trygde_rate')
if self.age > self.parameter('trygde_rate_cutoff_age_hi') or self.age < self.parameter(
'trygde_rate_cutoff_age_lo'):
rate = self.parameter('trygde_rate_extreme')
income = self.salary + self.pension
if income <= self.parameter('trygde_income_limit'):
return 0
overshooting_income = income - self.parameter('trygde_income_limit')
# share_of_overshooting =
raw_tax = rate * self.salary + \
self.parameter('trygde_rate_pension') * self.pension
if raw_tax >= self.parameter('trygde_max_share') * overshooting_income:
ans = self.parameter('trygde_max_share') * overshooting_income
if apply_rounding:
return tut.tax_round(ans)
return ans
if apply_rounding:
return tut.tax_round(raw_tax)
return raw_tax
def deduction(self):
if self.pension > 0:
max_ded = self.parameter(
'max_pension_tax_deduction') * (self.pension_percent / 100) * (self.pension_months / 12)
# pdb.set_trace()
reduction_stage1 = self.parameter(
'pension_deduction_stage_one_threshold') * (self.pension_percent / 100) * (self.pension_months / 12)
red_rate1 = self.parameter(
'stage_one_reduction_rate')
reduction_stage2 = self.parameter(
'pension_deduction_stage_two_threshold') * (self.pension_percent / 100) * (self.pension_months / 12)
red_rate2 = self.parameter(
'stage_two_reduction_rate')
cutoff = min(np.round(max_ded -
((min(self.pension, reduction_stage2) -
reduction_stage1) *
red_rate1 +
max((self.pension -
reduction_stage2) *
red_rate2, 0)), 0), max_ded)
deductions = self.municipal_income_tax() + self.county_income_tax() + \
self.common_income_tax() + self.bracket_tax + self.national_insurance()
return max(min(cutoff, deductions), 0)
return 0
def tax(self, apply_rounding=True):
if apply_rounding:
return tut.tax_round(self.state_wealth_tax()) + tut.tax_round(self.municipal_wealth_tax()) + tut.tax_round(self.municipal_income_tax()) + tut.tax_round(
self.county_income_tax()) + tut.tax_round(self.common_income_tax()) + tut.tax_round(self.bracket_tax) + tut.tax_round(self.national_insurance()) - tut.tax_round(self.deduction())
return self.state_wealth_tax() + self.municipal_wealth_tax() + self.municipal_income_tax() + self.county_income_tax() + \
self.common_income_tax() + self.bracket_tax + \
self.national_insurance() - self.deduction()
def tax_breakdown(self):
out = [['Formueskatt stat', self.state_wealth_tax_basis, self.state_wealth_tax()], [
'Formueskatt kommune', self.state_wealth_tax_basis, self.municipal_wealth_tax()]]
out += [['Inntektsskatt til kommune', self.income_tax_basis, self.municipal_income_tax()], ['Inntektsskatt til fylkeskommune', self.income_tax_basis, self.county_income_tax()],
['Fellesskatt', self.income_tax_basis, self.common_income_tax()], ['Trinnskatt', self.salary + self.pension, self.bracket_tax], ['Trygdeavgift', self.salary + self.pension, self.national_insurance()]]
out += [['Sum skattefradrag', 0, self.deduction()]]
# if apply_rounding:
out += [['Din Skatt', np.nan, self.tax()]]
return pd.DataFrame(out, columns=['Skatt', 'Grunnlag', 'Beloep'])
def display_online_breakdown(self, refresh=False, session=None):
"""
this is the online ('true') figures
"""
frame = self.parsed_official_response()
orig_df = frame.copy()
frame.tax = frame.tax.map(tut.big_fmt)
frame.tax_basis = frame.tax_basis.map(tut.big_fmt)
for col in ['tax_basis', 'tax']:
frame.loc[1, col] = ''
tut.display_df(frame)
return orig_df
def parsed_official_response(self, refresh=False, session=None):
resp = self.query_web_for_tax_results(refresh=refresh, session=session)
# pdb.set_trace()
raw = resp.json()
data = raw['hovedperson']['beregnetSkattV3']['skattOgAvgift']
correct_tax = raw['hovedperson']['beregnetSkattV3']['informasjonTilSkattelister']['beregnetSkatt']
# if we don't have any wealth, we won't have those wealth tax keys in
# data, seems we don't have deduction keys either?
items = [
'inntektsskattTilKommune',
'inntektsskattTilFylkeskommune',
'fellesskatt',
'trinnskatt',
'sumTrygdeavgift',
'formueskattTilStat',
'formueskattTilKommune']
missed_items = list(set(data.keys()) - set(items))
deductions = raw['hovedperson']['beregnetSkattV3'].get(
'sumSkattefradrag', {}).get('beloep', 0)
if len(missed_items) > 0:
print("Did not use these items: %s" % (','.join(missed_items)))
# pdb.set_trace()
not_in_res = list(set(items) - set(data.keys()))
if len(not_in_res) > 0:
print(
"Did not find these items in response: %s" %
(','.join(not_in_res)))
out = [['Salary', self.salary, 0], ['', 0, 0]]
for item in items:
try:
out.append(
[item, data[item]['grunnlag'], data[item]['beloep']])
except Exception:
# print(err.msg)
out.append([item, 0, 0])
if deductions != 0:
out.append(['Sum skattefradrag', 0, -deductions])
frame = | pd.DataFrame(out, columns=['tax_type', 'tax_basis', 'tax']) | pandas.DataFrame |
"""Get Names from Vornam Koeln.
Sources:
- Official Source: https://offenedaten-koeln.de/
- Download Samples: https://github.com/fxnn/vornamen
"""
import pandas as pd
urls = [
"https://offenedaten-koeln.de/sites/default/files/Vornamen_Koeln_2017.csv",
"https://offenedaten-koeln.de/sites/default/files/Vornamen_Koeln_2016.csv",
"https://offenedaten-koeln.de/sites/default/files/Vornamen_Koeln_2015.csv",
"https://offenedaten-koeln.de/sites/default/files/Vornamensstatistik_2014_0.csv",
"https://offenedaten-koeln.de/sites/default/files/Vornamen_Koeln_2013.csv",
"https://offenedaten-koeln.de/sites/default/files/Vornamen_Koeln_2012.csv",
"https://offenedaten-koeln.de/sites/default/files/Vornamen_Koeln_2011.csv",
"https://offenedaten-koeln.de/sites/default/files/Vornamen_Koeln_2010.csv",
]
names_dfs = []
for url in urls:
# The files aren't consistent with their delimiter selection
# Some use comma & others a semi-colon
df = pd.read_csv(url, sep=";")
if df.shape[1] == 1:
df = pd.read_csv(url, sep=",")
# Translation mapping from German to English
colnames_dict = {
"vorname": "first_name",
"geschlecht": "gender",
}
# Select the desired columns & translate
df = df[list(colnames_dict.keys())]
df.rename(columns=colnames_dict, inplace=True)
names_dfs.append(df)
# Combine all
names_df = | pd.concat(names_dfs) | pandas.concat |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["cos2"] = (loading_outlier_scale_df["PC1"] ** 2) + (
loading_outlier_scale_df["PC2"] ** 2)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_df.iloc[:, 2], columns=['cos2'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='cos2')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar['cos2'] = (loading_scale_df_covar["PC1"] ** 2) + (loading_scale_df_covar["PC2"] ** 2)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["cos2"] = (loading_outlier_scale_df_covar["PC1"] ** 2) + (
loading_outlier_scale_df_covar["PC2"] ** 2)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_df_covar.iloc[:, 2],
columns=['cos2'])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar,
line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='cos2')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
textposition='bottom right', textfont=dict(size=12)
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers',
hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)), mirror=True,
ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)), mirror=True,
ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# # x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["cos2"] = (loading_scale_input_df["PC1"] ** 2) + (loading_scale_input_df["PC2"] ** 2)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_df.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# # x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["cos2"] = (loading_scale_input_outlier_df["PC1"] ** 2) + \
(loading_scale_input_outlier_df["PC2"] ** 2)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_df.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='cos2')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["cos2"] = (loading_scale_input_df_covar["PC1"] ** 2) + (
loading_scale_input_df_covar["PC2"] ** 2)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["cos2"] = (loading_scale_input_outlier_df_covar["PC1"] ** 2) + \
(loading_scale_input_outlier_df_covar["PC2"] ** 2)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_df_covar.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='cos2')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
variance = Var_scale_input_outlier_covar
data = loading_scale_input_outlier_line_graph_sort_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('contrib-plot', 'figure'),
[
Input('outlier-value-contrib', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-contrib", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df["PC1_cos2"] = loading_scale_df["PC1"] ** 2
loading_scale_df["PC2_cos2"] = loading_scale_df["PC2"] ** 2
loading_scale_df["PC1_contrib"] = \
(loading_scale_df["PC1_cos2"] * 100) / (loading_scale_df["PC1_cos2"].sum(axis=0))
loading_scale_df["PC2_contrib"] = \
(loading_scale_df["PC2_cos2"] * 100) / (loading_scale_df["PC2_cos2"].sum(axis=0))
loading_scale_df["contrib"] = loading_scale_df["PC1_contrib"] + loading_scale_df["PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_scale_dataf = pd.concat([loading_scale_df.iloc[:, 0:2], loading_scale_df.iloc[:, 6]], axis=1)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_dataf, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["PC1_cos2"] = loading_outlier_scale_df["PC1"] ** 2
loading_outlier_scale_df["PC2_cos2"] = loading_outlier_scale_df["PC2"] ** 2
loading_outlier_scale_df["PC1_contrib"] = \
(loading_outlier_scale_df["PC1_cos2"] * 100) / (loading_outlier_scale_df["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df["PC2_contrib"] = \
(loading_outlier_scale_df["PC2_cos2"] * 100) / (loading_outlier_scale_df["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df["contrib"] = loading_outlier_scale_df["PC1_contrib"] + loading_outlier_scale_df[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf = pd.concat(
[loading_outlier_scale_df.iloc[:, 0:2], loading_outlier_scale_df.iloc[:, 6]], axis=1)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_dataf, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='contrib')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar["PC1_cos2"] = loading_scale_df_covar["PC1"] ** 2
loading_scale_df_covar["PC2_cos2"] = loading_scale_df_covar["PC2"] ** 2
loading_scale_df_covar["PC1_contrib"] = \
(loading_scale_df_covar["PC1_cos2"] * 100) / (loading_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_df_covar["PC2_contrib"] = \
(loading_scale_df_covar["PC2_cos2"] * 100) / (loading_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_df_covar["contrib"] = loading_scale_df_covar["PC1_contrib"] + loading_scale_df_covar[
"PC2_contrib"]
loading_scale_dataf_covar = pd.concat([loading_scale_df_covar.iloc[:, 0:2], loading_scale_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_dataf_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_dataf_covar.iloc[:, 2], columns=['contrib'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX OUTLIERS REMOVED
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["PC1_cos2"] = loading_outlier_scale_df_covar["PC1"] ** 2
loading_outlier_scale_df_covar["PC2_cos2"] = loading_outlier_scale_df_covar["PC2"] ** 2
loading_outlier_scale_df_covar["PC1_contrib"] = \
(loading_outlier_scale_df_covar["PC1_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["PC2_contrib"] = \
(loading_outlier_scale_df_covar["PC2_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["contrib"] = loading_outlier_scale_df_covar["PC1_contrib"] + \
loading_outlier_scale_df_covar[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf_covar = pd.concat(
[loading_outlier_scale_df_covar.iloc[:, 0:2], loading_outlier_scale_df_covar.iloc[:, 6]], axis=1)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_dataf_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_outlier_scale_dff_covar = pd.concat(
[zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='contrib')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0),
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["PC1_cos2"] = loading_scale_input_df["PC1"] ** 2
loading_scale_input_df["PC2_cos2"] = loading_scale_input_df["PC2"] ** 2
loading_scale_input_df["PC1_contrib"] = \
(loading_scale_input_df["PC1_cos2"] * 100) / (loading_scale_input_df["PC1_cos2"].sum(axis=0))
loading_scale_input_df["PC2_contrib"] = \
(loading_scale_input_df["PC2_cos2"] * 100) / (loading_scale_input_df["PC2_cos2"].sum(axis=0))
loading_scale_input_df["contrib"] = loading_scale_input_df["PC1_contrib"] + loading_scale_input_df[
"PC2_contrib"]
loading_scale_input_dataf = pd.concat(
[loading_scale_input_df.iloc[:, 0:2], loading_scale_input_df.iloc[:, 6]], axis=1)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_dataf, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["PC1_cos2"] = loading_scale_input_outlier_df["PC1"] ** 2
loading_scale_input_outlier_df["PC2_cos2"] = loading_scale_input_outlier_df["PC2"] ** 2
loading_scale_input_outlier_df["PC1_contrib"] = \
(loading_scale_input_outlier_df["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df["PC2_contrib"] = \
(loading_scale_input_outlier_df["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df["contrib"] = loading_scale_input_outlier_df["PC1_contrib"] + \
loading_scale_input_outlier_df[
"PC2_contrib"]
loading_scale_input_outlier_dataf = pd.concat(
[loading_scale_input_outlier_df.iloc[:, 0:2], loading_scale_input_outlier_df.iloc[:, 6]], axis=1)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat(
[loading_scale_input_outlier_dataf, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_dataf.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='contrib')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["PC1_cos2"] = loading_scale_input_df_covar["PC1"] ** 2
loading_scale_input_df_covar["PC2_cos2"] = loading_scale_input_df_covar["PC2"] ** 2
loading_scale_input_df_covar["PC1_contrib"] = \
(loading_scale_input_df_covar["PC1_cos2"] * 100) / (loading_scale_input_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_df_covar["PC2_contrib"] = \
(loading_scale_input_df_covar["PC2_cos2"] * 100) / (loading_scale_input_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_df_covar["contrib"] = loading_scale_input_df_covar["PC1_contrib"] + \
loading_scale_input_df_covar[
"PC2_contrib"]
loading_scale_input_dataf_covar = pd.concat(
[loading_scale_input_df_covar.iloc[:, 0:2], loading_scale_input_df_covar.iloc[:, 6]], axis=1)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_dataf_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["PC1_cos2"] = loading_scale_input_outlier_df_covar["PC1"] ** 2
loading_scale_input_outlier_df_covar["PC2_cos2"] = loading_scale_input_outlier_df_covar["PC2"] ** 2
loading_scale_input_outlier_df_covar["PC1_contrib"] = \
(loading_scale_input_outlier_df_covar["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["PC2_contrib"] = \
(loading_scale_input_outlier_df_covar["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["contrib"] = loading_scale_input_outlier_df_covar["PC1_contrib"] + \
loading_scale_input_outlier_df_covar[
"PC2_contrib"]
loading_scale_input_outlier_dataf_covar = pd.concat(
[loading_scale_input_outlier_df_covar.iloc[:, 0:2], loading_scale_input_outlier_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat(
[loading_scale_input_outlier_dataf_covar, line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff_covar = pd.concat(
[zero_scale_input_outlier_df_covar, zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='contrib')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_sort_covar
variance = Var_scale_input_outlier_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0)
))
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('download-link', 'download'),
[Input('all-custom-choice', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value')])
def update_filename(all_custom, outlier, matrix_type):
if all_custom == 'All' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_data.csv'
return download
@app.callback(Output('download-link', 'href'),
[Input('all-custom-choice', 'value'),
Input('feature-input', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')])
def update_link(all_custom, input, outlier, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
# COVARIANCE MATRIX REMOVING OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
# COVARIANCE MATRIX OUTLIERS REMOVED
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
csv_string = dat.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return csv_string
@app.callback(Output('download-link-correlation', 'download'),
[Input('eigenA-outlier', 'value'),
])
def update_filename(outlier):
if outlier == 'Yes':
download = 'feature_correlation_removed_outliers_data.csv'
elif outlier == 'No':
download = 'feature_correlation_data.csv'
return download
@app.callback([Output('data-table-correlation', 'data'),
Output('data-table-correlation', 'columns'),
Output('download-link-correlation', 'href')],
[Input("eigenA-outlier", 'value'),
Input('csv-data', 'data')], )
def update_output(outlier, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff_table = correlation_dff * correlation_dff
r2_dff_table.insert(0, 'Features', features)
data_frame = r2_dff_table
if outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier_table = correlation_dff_outlier * correlation_dff_outlier
r2_dff_outlier_table.insert(0, 'Features', features_outlier)
data_frame = r2_dff_outlier_table
data = data_frame.to_dict('records')
columns = [{"name": i, "id": i, "deletable": True, "selectable": True, 'type': 'numeric',
'format': Format(precision=3, scheme=Scheme.fixed)} for i in data_frame.columns]
csv_string = data_frame.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return data, columns, csv_string
@app.callback(Output('download-link-eigenA', 'download'),
[Input("matrix-type-data-table", 'value'),
Input('eigenA-outlier', 'value')])
def update_filename(matrix_type, outlier):
if outlier == 'Yes' and matrix_type == "Correlation":
download = 'Eigen_Analysis_correlation_matrix_removed_outliers_data.csv'
elif outlier == 'Yes' and matrix_type == "Covariance":
download = 'Eigen_Analysis_covariance_matrix_removed_outliers_data.csv'
elif outlier == 'No' and matrix_type == "Correlation":
download = 'Eigen_Analysis_correlation_matrix_data.csv'
elif outlier == "No" and matrix_type == "Covariance":
download = 'Eigen_Analysis_covariance_matrix_data.csv'
return download
@app.callback([Output('data-table-eigenA', 'data'),
Output('data-table-eigenA', 'columns'),
Output('download-link-eigenA', 'href')],
[Input('all-custom-choice', 'value'),
Input("eigenA-outlier", 'value'),
Input('feature-input', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')], )
def update_output(all_custom, outlier, input, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))],
columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
Var_dfff = pd.concat([(Var_cumsum * 100)], axis=1)
Eigen_Analysis = pd.concat([PC_df.T, Eigen_df.T, Var_df.T, Var_dfff.T], axis=0)
Eigen_Analysis = Eigen_Analysis.rename(columns=Eigen_Analysis.iloc[0])
Eigen_Analysis = Eigen_Analysis.drop(Eigen_Analysis.index[0])
Eigen_Analysis.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
Var_dfff_outlier = pd.concat([Var_cumsum_outlier * 100], axis=1)
Eigen_Analysis_Outlier = pd.concat(
[PC_df_outlier.T, Eigen_df_outlier.T, Var_df_outlier.T, Var_dfff_outlier.T],
axis=0)
Eigen_Analysis_Outlier = Eigen_Analysis_Outlier.rename(columns=Eigen_Analysis_Outlier.iloc[0])
Eigen_Analysis_Outlier = Eigen_Analysis_Outlier.drop(Eigen_Analysis_Outlier.index[0])
Eigen_Analysis_Outlier.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_Outlier
elif outlier == "No" and matrix_type == "Covariance":
features1 = dff.columns
features = list(features1)
x_covar = dff.loc[:, features].values
pca_covar = PCA(n_components=len(features))
principalComponents_covar = pca_covar.fit_transform(x_covar)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
dfff_covar = finalDf_covar
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
Var_dfff_covar = pd.concat([(Var_cumsum_covar * 100)], axis=1)
Eigen_Analysis_covar = pd.concat([PC_df_covar.T, Eigen_df_covar.T, Var_df_covar.T, Var_dfff_covar.T],
axis=0)
Eigen_Analysis_covar = Eigen_Analysis_covar.rename(columns=Eigen_Analysis_covar.iloc[0])
Eigen_Analysis_covar = Eigen_Analysis_covar.drop(Eigen_Analysis_covar.index[0])
Eigen_Analysis_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier_covar = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier_covar = outlier_dff.loc[:, ].values
pca_outlier_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier_covar)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
,
columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
dfff_outlier_covar = finalDf_outlier_covar
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier_covar = np.interp(70,
Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier_covar = math.ceil(PC_interp_outlier_covar)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = | pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1) | pandas.concat |
import numpy as np
import matplotlib.pyplot as pls
import pandas as pd
import warnings
from IPython.display import display, HTML
import seaborn as sns
import lightgbm as lgb
from lightgbm import LGBMClassifier,LGBMRegressor
import shap
from .eda_anova import anova,two_way_anova,turkeyHSD
warnings.filterwarnings("ignore")
#=====================#=====================#=====================
# single dataset eda
#=====================#=====================#=====================
#single dataset report
def report(df,target=None,ignore=[],nbrmax=20):
do_eda(df,target,ignore,nbrmax)
#=====================#=====================#=====================#
# shap
#=====================#=====================#=====================#
#shap values
def plot_shaps(x, target,ignore=[],nbrmax=None,dependency=True):
features=x.columns.to_list()
features.remove(target)
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
#doesn't work on time columns, remove id columns (all values are different), columns with all nulls
for f in x.columns.to_list():
if (isTime(x[f].dtype) or x[f].isnull().values.all() or (len(x[f].unique())>x.shape[0]/2.0 and str(x[f].dtype) not in numerics)) and f in features:
features.remove(f)
features=list(set(features)-set(ignore))
[print('Feature name {} cantains special JSON characters - Skip'.format(x)) for x in features if ':' in x ]
features=[ x for x in features if not ':' in x ]
#list of categorical features
categorical_features=x[features].select_dtypes(exclude=numerics).columns.to_list()
#change type to categorical for lightgbm
backup={}
for c in categorical_features:
backup[c]=x[c].dtype
x[c] = x[c].astype('category')
target_type,target_cardinality,_=get_feature_info(x,target)
binary_target=(target_type=='Numeric' and target_cardinality==2)
if nbrmax==None:
if len(features)>20:
print('Shap values for 20 most important features will be plotted. If you need more please set nbrmax parameter')
nbrmax=20
if binary_target:
clf = LGBMClassifier(
objective='binary'
,n_estimators=100
, min_data_in_leaf = 10
, min_sum_hessian_in_leaf = 10
, feature_fraction = 0.9
, bagging_fraction = 1
, bagging_freq = 1
, metric='auc'
, learning_rate = 0.03
, num_leaves = 19
, num_threads = 2
, nrounds = 500
)
else:
clf = LGBMRegressor(
n_estimators=100
, min_data_in_leaf = 10
, min_sum_hessian_in_leaf = 10
, feature_fraction = 0.9
, bagging_fraction = 1
, bagging_freq = 1
, learning_rate = 0.03
, num_leaves = 19
, num_threads = 2
, nrounds = 500
)
clf.fit(x[features], x[target])#,categorical_feature=categorical_features)
shap_values = shap.TreeExplainer(clf.booster_).shap_values(x[features])
shap.summary_plot(shap_values, x[features], max_display=nbrmax, auto_size_plot=True)
if binary_target:
vals= np.abs(shap_values).mean(0)
else:
vals= shap_values
feature_importance = pd.DataFrame(list(zip(x[features].columns, sum(vals))), columns=['col_name','feature_importance_vals'])
feature_importance.sort_values(by=['feature_importance_vals'], ascending=False,inplace=True)
sorted_features=feature_importance['col_name'].to_list()
X=x.copy()
if binary_target:
shap.summary_plot(shap_values[1], x[features])
if dependency:
for f in categorical_features:
X[f]= X[f].astype(object)
X[f]=pd.factorize(X[f])[0]
for name in sorted_features[:nbrmax]:
#continue
if name in categorical_features and x[name].astype(str).nunique()>100:
continue
fig, ax = pls.subplots(1,1,figsize=(20,10))
shap.dependence_plot(name, shap_values[1], X[features], display_features=x[features], interaction_index=None,ax=ax)
pls.show()
#restore type
for c in categorical_features:
x[c] = x[c].astype(backup[c])
return sorted_features
#=====================#=====================#=====================#=====================
# numerical continues
#=====================#=====================#=====================#=====================
def plot_cuts(df,feature,target,bins=None, figsize=(12,6)):
if bins==None:
bins=np.arange(df[feature].min(),df[feature].max(),(df[feature].max()-df[feature].min())/10.)
fig, (ax1, ax2) = pls.subplots(ncols=2, figsize=figsize)
pls.title('Histogram of {}'.format(feature));
ax1.set_xlabel(feature)
ax1.set_ylabel('count')
ax2.set_xlabel(feature)
ax2.set_ylabel(target)
df.groupby(pd.cut(df[feature], bins=bins))[target].count().plot(kind='bar',ax=ax1,grid=True)
df.groupby(pd.cut(df[feature], bins=bins))[target].mean().plot(kind='bar',ax=ax2,grid=True)
pls.show()
def plot_qcuts(df,feature,target,q=None, figsize=(8,4)):
if q==None:
q = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,0.9, 1]
fig, (ax1, ax2) = pls.subplots(ncols=2, figsize=figsize)
pls.title('Histogram of {}'.format(feature));
ax1.set_xlabel(feature)
ax1.set_ylabel('count')
ax2.set_xlabel(feature)
ax2.set_ylabel(target)
df.groupby(pd.qcut(df[feature], q=q,duplicates='drop'))[target].count().plot(kind='bar',ax=ax1,grid=True)
df.groupby(pd.qcut(df[feature], q=q,duplicates='drop'))[target].mean( ).plot(kind='bar',ax=ax2,grid=True)
pls.show()
#=====================#=====================#=====================#=====================
# categorical
#=====================#=====================#=====================#=====================
def plot_stats(df,feature,target,max_nbr=20,sort='Count ',ax1=None,ax2=None):
end=max_nbr
createfig=(ax1==None or ax2==None)
cat_count = df[feature].value_counts().reset_index()
cat_count.columns = [feature,'Count ']
cat_count.sort_values(by=sort, ascending=False, inplace=True)
cat_perc = df[[feature, target]].groupby([feature],as_index=False).mean()
cat_perc=pd.merge(cat_perc,cat_count,on=feature)
cat_perc.sort_values(by=sort, ascending=False, inplace=True)
size=(12,6) if len(cat_count[:max_nbr]) <=40 else (12,14)
if createfig:
fig, (ax1, ax2) = pls.subplots(ncols=2, figsize=size)
sns.set_color_codes("pastel")
s = sns.barplot(ax=ax1, x = feature, y="Count ",order=cat_count[feature][:max_nbr],data=cat_count[:max_nbr])
s.set_xticklabels(s.get_xticklabels(),rotation=90)
s = sns.barplot(ax=ax2, x = feature, y=target, order=cat_perc[feature][:max_nbr], data=cat_perc[:max_nbr])
s.set_xticklabels(s.get_xticklabels(),rotation=90)
pls.ylabel(target, fontsize=10)
pls.tick_params(axis='both', which='major', labelsize=10)
if createfig:
pls.show()
def plot_melt(df,feature,target1,target2,end=20):
cat_count = df[feature].value_counts().reset_index()
cat_count.columns =[feature,'Count ']
cat_count.sort_values(by='Count ', ascending=False, inplace=True)
cat_perc = df[[feature, target1]].groupby([feature],as_index=False).mean()
cat_perc=pd.merge(cat_perc,cat_count,on=feature)
cat_perc2 = df[[feature, target2]].groupby([feature],as_index=False).mean()
cat_perc= | pd.merge(cat_perc,cat_perc2,on=feature) | pandas.merge |
# coding: utf-8
import pandas as pd
import baostock as bs
from datetime import datetime, timedelta
lg = bs.login()
# 显示登陆返回信息
print('login respond error_code:' + lg.error_code)
print('login respond error_msg:' + lg.error_msg)
def get_stock_info(symbol):
rs = bs.query_stock_basic(code=symbol)
data_list = []
while (rs.error_code == '0') & rs.next():
# 获取一条记录,将记录合并在一起
data_list.append(rs.get_row_data())
result = pd.DataFrame(data_list, columns=rs.fields)
return result
def get_index_stocks(symbol):
"""获取指数成份股
:param symbol: str
如 399300.SZ
:param date: str or datetime
日期,如 2020-08-08
:return: list
examples:
-------
>>> symbols1 = get_index_stocks("000300.XSHG", date="2020-07-08")
>>> symbols2 = get_index_stocks("000300.XSHG", date=datetime.now())
"""
industry_list = []
if symbol == 'sz50':
rs = bs.query_sz50_stocks()
elif symbol == 'hs300':
rs = bs.query_hs300_stocks()
elif symbol == 'zz500':
rs = bs.query_zz500_stocks()
else:
rs = bs.query_stock_industry()
while (rs.error_code == '0') & rs.next():
# 获取一条记录,将记录合并在一起
industry_list.append(rs.get_row_data())
result = pd.DataFrame(industry_list, columns=rs.fields)
return list(set([x for x in result.code]))
def _get_start_date(end_date, freq):
if isinstance(end_date, str):
end_date = pd.to_datetime(end_date)
if freq == '1':
start_date = end_date - timedelta(days=30)
elif freq == '5':
start_date = end_date - timedelta(days=70)
elif freq == '15':
start_date = end_date - timedelta(days=200)
elif freq == '30':
start_date = end_date - timedelta(days=300)
elif freq == '60':
start_date = end_date - timedelta(days=500)
elif freq == 'd':
start_date = end_date - timedelta(weeks=500)
elif freq == 'w':
start_date = end_date - timedelta(weeks=1000)
elif freq == 'm':
start_date = end_date - timedelta(weeks=2000)
else:
raise ValueError("'freq' value error, current value is %s, "
"optional valid values are ['1min', '5min', '30min', "
"'D', 'W']" % freq)
return start_date
def get_kline(symbol, end_date, freq, start_date=None, count=None):
"""获取K线数据
:param symbol: str
Tushare 标的代码 + Tushare asset 代码,如 000001.SH-I
:param start_date: datetime
截止日期
:param end_date: datetime
截止日期
:param freq: str
K线级别,可选值 ['1min', '5min', '30min', '60min', 'D', 'W', "M"]
:param count: int
K线数量,最大值为 5000
:return: pd.DataFrame
>>> start_date = datetime.strptime("20200701", "%Y%m%d")
>>> end_date = datetime.strptime("20200719", "%Y%m%d")
>>> df1 = get_kline(symbol="000001.SH-I", start_date=start_date, end_date=end_date, freq="1min")
>>> df2 = get_kline(symbol="000001.SH-I", end_date=end_date, freq="1min", count=1000)
"""
if count:
start_date = _get_start_date(end_date, freq)
start_date = start_date.date().__str__()
if isinstance(end_date, str):
end_date = pd.to_datetime(end_date)
end_date = end_date + timedelta(days=1)
end_date = end_date.date().__str__()
if isinstance(end_date, datetime):
end_date = end_date.date().__str__()
if isinstance(start_date, datetime):
start_date = start_date.date().__str__()
rs = None
if freq in ('d', 'w', 'm'):
rs = bs.query_history_k_data_plus(symbol,
"date,code,open,high,low,close,volume,amount",
start_date=start_date, end_date=end_date,
frequency=freq, adjustflag="2")
else:
rs = bs.query_history_k_data_plus(symbol,
"time,code,open,high,low,close,volume,amount",
start_date=start_date, end_date=end_date,
frequency=freq, adjustflag="2")
data_list = []
while (rs.error_code == '0') & rs.next():
# 获取一条记录,将记录合并在一起
data_list.append(rs.get_row_data())
df = pd.DataFrame(data_list, columns=rs.fields)
# 统一 k 线数据格式为 6 列,分别是 ["symbol", "dt", "open", "close", "high", "low", "vr"]
if freq in ('d', 'w', 'm'):
df.rename(columns={'code': "symbol", "date": "dt", "volume": "vol"}, inplace=True)
else:
df.rename(columns={'code': "symbol", "time": "dt", "volume": "vol"}, inplace=True)
df = df.dropna()
df.drop_duplicates(subset='dt', keep='first', inplace=True)
df.sort_values('dt', inplace=True)
df['dt'] = df.dt.apply(str)
df['open'] = df.open.apply(float)
df['close'] = df.close.apply(float)
df['high'] = df.high.apply(float)
df['low'] = df.low.apply(float)
df['vol'] = df.vol.apply(float)
if freq in ('1','5','15','30','60'):
# 清理 9:30 的空数据
df['not_start'] = df.dt.apply(lambda x: not x.endswith("09:30:00"))
df = df[df['not_start']]
if count:
df = df.tail(count)
df.reset_index(drop=True, inplace=True)
# df.loc[:, "dt"] = pd.to_datetime(df['dt'])
df["dt"] = df["dt"].apply(pd.to_datetime)
k = df[['symbol', 'dt', 'open', 'close', 'high', 'low', 'vol']]
for col in ['open', 'close', 'high', 'low']:
k.loc[:, col] = k[col].apply(lambda x: round(x, 2))
return k
def download_kline(symbol, freq, start_date, end_date, delta, save=True):
"""下载K线数据
:param save:
:param symbol:
:param end_date:
:param freq:
:param start_date:
:param delta:
:return:
>>> start_date = datetime.strptime("20200101", "%Y%m%d")
>>> end_date = datetime.strptime("20200719", "%Y%m%d")
>>> df = download_kline("000001.SH-I", "1min", start_date, end_date, delta=timedelta(days=10), save=False)
"""
data = []
end_dt = start_date + delta
print("开始下载数据:{} - {} - {}".format(symbol, start_date, end_date))
df_ = get_kline(symbol, start_date=start_date, end_date=end_dt, freq=freq)
if not df_.empty:
data.append(df_)
while end_dt < end_date:
df_ = get_kline(symbol, start_date=start_date, end_date=end_dt, freq=freq)
if not df_.empty:
data.append(df_)
start_date = end_dt
end_dt += delta
print("当前下载进度:{} - {} - {}".format(symbol, start_date, end_dt))
df = | pd.concat(data, ignore_index=True) | pandas.concat |
"""
Author: <NAME>
Modified: <NAME>
"""
import os
import warnings
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from statsmodels.tools.sm_exceptions import EstimationWarning
from statsmodels.tsa.holtwinters import (ExponentialSmoothing,
SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS)
base, _ = os.path.split(os.path.abspath(__file__))
housing_data = pd.read_csv(os.path.join(base, 'results', 'housing-data.csv'))
housing_data = housing_data.set_index('DATE')
housing_data = housing_data.asfreq('MS')
SEASONALS = ('add', 'mul', None)
TRENDS = ('add', 'mul', None)
def _simple_dbl_exp_smoother(x, alpha, beta, l0, b0, nforecast=0):
"""
Simple, slow, direct implementation of double exp smoothing for testing
"""
n = x.shape[0]
l = np.zeros(n)
b = np.zeros(n)
xhat = np.zeros(n)
f = np.zeros(nforecast)
l[0] = l0
b[0] = b0
# Special case the 0 observations since index -1 is not available
xhat[0] = l0 + b0
l[0] = alpha * x[0] + (1 - alpha) * (l0 + b0)
b[0] = beta * (l[0] - l0) + (1 - beta) * b0
for t in range(1, n):
# Obs in index t is the time t forecast for t + 1
l[t] = alpha * x[t] + (1 - alpha) * (l[t - 1] + b[t - 1])
b[t] = beta * (l[t] - l[t - 1]) + (1 - beta) * b[t - 1]
xhat[1:] = l[0:-1] + b[0:-1]
f[:] = l[-1] + np.arange(1, nforecast + 1) * b[-1]
err = x - xhat
return l, b, f, err, xhat
class TestHoltWinters(object):
@classmethod
def setup_class(cls):
# Changed for backwards compatibility with pandas
# oildata_oil_json = '{"851990400000":446.6565229,"883526400000":454.4733065,"915062400000":455.662974,"946598400000":423.6322388,"978220800000":456.2713279,"1009756800000":440.5880501,"1041292800000":425.3325201,"1072828800000":485.1494479,"1104451200000":506.0481621,"1135987200000":526.7919833,"1167523200000":514.268889,"1199059200000":494.2110193}'
# oildata_oil = pd.read_json(oildata_oil_json, typ='Series').sort_index()
data = [446.65652290000003, 454.47330649999998, 455.66297400000002,
423.63223879999998, 456.27132790000002, 440.58805009999998,
425.33252010000001, 485.14944789999998, 506.04816210000001,
526.79198329999997, 514.26888899999994, 494.21101929999998]
index = ['1996-12-31 00:00:00', '1997-12-31 00:00:00', '1998-12-31 00:00:00',
'1999-12-31 00:00:00', '2000-12-31 00:00:00', '2001-12-31 00:00:00',
'2002-12-31 00:00:00', '2003-12-31 00:00:00', '2004-12-31 00:00:00',
'2005-12-31 00:00:00', '2006-12-31 00:00:00', '2007-12-31 00:00:00']
oildata_oil = pd.Series(data, index)
oildata_oil.index = pd.DatetimeIndex(oildata_oil.index,
freq=pd.infer_freq(oildata_oil.index))
cls.oildata_oil = oildata_oil
# air_ausair_json = '{"662601600000":17.5534,"694137600000":21.8601,"725760000000":23.8866,"757296000000":26.9293,"788832000000":26.8885,"820368000000":28.8314,"851990400000":30.0751,"883526400000":30.9535,"915062400000":30.1857,"946598400000":31.5797,"978220800000":32.577569,"1009756800000":33.477398,"1041292800000":39.021581,"1072828800000":41.386432,"1104451200000":41.596552}'
# air_ausair = pd.read_json(air_ausair_json, typ='Series').sort_index()
data = [17.5534, 21.860099999999999, 23.886600000000001,
26.929300000000001, 26.888500000000001, 28.831399999999999,
30.075099999999999, 30.953499999999998, 30.185700000000001,
31.579699999999999, 32.577568999999997, 33.477398000000001,
39.021580999999998, 41.386431999999999, 41.596552000000003]
index = ['1990-12-31 00:00:00', '1991-12-31 00:00:00', '1992-12-31 00:00:00',
'1993-12-31 00:00:00', '1994-12-31 00:00:00', '1995-12-31 00:00:00',
'1996-12-31 00:00:00', '1997-12-31 00:00:00', '1998-12-31 00:00:00',
'1999-12-31 00:00:00', '2000-12-31 00:00:00', '2001-12-31 00:00:00',
'2002-12-31 00:00:00', '2003-12-31 00:00:00', '2004-12-31 00:00:00']
air_ausair = | pd.Series(data, index) | pandas.Series |
import pytest
from mapping import mappings
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
from pandas.tseries.offsets import BDay
@pytest.fixture
def dates():
return pd.Series(
[TS('2016-10-20'), TS('2016-11-21'), TS('2016-12-20')],
index=['CLX16', 'CLZ16', 'CLF17']
)
def test_not_in_roll_one_generic_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:2]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16']])
midx.names = ['date', 'contract']
cols = pd.Index([0], name='generic')
wts_exp = pd.DataFrame([1.0, 1.0], index=midx, columns=cols)
# with DatetimeIndex
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
# with tuple
wts = mappings.roller(tuple(timestamps), contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
def test_not_in_roll_one_generic_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_non_numeric_column_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([["CL1"], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [("CL1", 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_finished_roll_pre_expiry_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-2)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-9, -8]
transition = pd.DataFrame([[1.0, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_filtering_front_contracts_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:2]
ts = dates.iloc[1] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_roll_with_holiday(dates):
contract_dates = dates.iloc[-2:]
ts = pd.Timestamp("2016-11-17")
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
holidays = [np.datetime64("2016-11-18")]
# the holiday moves the roll schedule up one day, since Friday is
# excluded as a day
wts = mappings.static_transition(ts, contract_dates, transition,
holidays)
wts_exp = [(0, 'CLZ16', 0.5, ts), (0, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_zero_weight_back_contract_no_contract_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:1]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_aggregate_weights():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list)
idx = pd.MultiIndex.from_product([[ts], ["CLX16", "CLZ16"]],
names=["date", "contract"])
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_aggregate_weights_drop_date():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list, drop_date=True)
idx = pd.Index(["CLX16", "CLZ16"], name="contract")
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_static_bad_transitions(dates):
contract_dates = dates.iloc[[0]]
ts = dates.iloc[0] + BDay(-8)
# transition does not contain 'front' column
cols = pd.MultiIndex.from_product([[0], ['not_front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition does not sum to one across rows
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition is not monotonic increasing in back
transition = pd.DataFrame([[0.7, 0.3], [0.8, 0.2], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
def test_no_roll_date_two_generics_static_transition(dates):
dt = dates.iloc[0]
contract_dates = dates
ts = dt + BDay(-8)
cols = | pd.MultiIndex.from_product([[0, 1], ['front', 'back']]) | pandas.MultiIndex.from_product |
# Mar21, 2022
##
#---------------------------------------------------------------------
# SERVER only input all files (.bam and .fa) output MeH matrix in .csv
# August 3, 2021 clean
# FINAL github
#---------------------------------------------------------------------
import random
import math
import pysam
import csv
import sys
import pandas as pd
import numpy as np
import datetime
import time as t
from collections import Counter, defaultdict, OrderedDict
#---------------------------------------
# Functions definition
#---------------------------------------
def open_log(fname):
open_log.logfile = open(fname, 'w', 1)
def logm(message):
log_message = "[%s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
print(log_message),
open_log.logfile.write(log_message)
def close_log():
open_log.logfile.close()
# Count # of windows with enough reads for complete/impute
def coverage(methbin,complete,w):
count=0
tot = 0
meth=methbin.iloc[:,methbin.columns!='Qname']
if len(meth.columns)>=w:
for i in range(len(meth.columns)-w+1):
# extract a window
temp = meth.iloc[:,i:i+w].copy()
#print(temp)
tot = tot+1
if (enough_reads(window=temp,complete=complete,w=w)):
count=count+1
#toprint=temp.notnull().sum(axis=1)>=w
#print(toprint.sum())
#print(count)
#print(tot)
return count/tot*100
else:
return 0
# Check whether a window has enough reads for complete/impute
def enough_reads(window,w,complete):
temp=np.isnan(window).sum(axis=1)==0
if complete: # For heterogeneity estimation
return temp.sum()>=2**w
else: # for imputation
tempw1=np.isnan(window).sum(axis=1)==1
return temp.sum()>=2**(w-2) and tempw1.sum()>0
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def getcomplete(window,w):
temp=np.isnan(window).sum(axis=1)==0
mat=window[np.where(temp)[0],:]
#temp=window.notnull().sum(axis=1)>=w
#mat=window.iloc[np.where(temp)[0],:]
#else:
# temp=mat.notnull().sum(axis=1)>=w-1
return mat
def PattoDis(mat,dist=1):
s=mat.shape[0]
dis=np.zeros((s,s))
for i in range(s):
for j in range(s):
if j<i:
if dist==1:
d=Ham_d(mat.iloc[i,],mat.iloc[j,])
else:
d=WDK_d(mat.iloc[i,],mat.iloc[j,])
dis[i,j]=dis[j,i]=d
return dis
def Ham_d(pat1,pat2):
return (pat1!=pat2).sum()
def WDK_d(pat1,pat2):
d=0
w=pat1.shape[0]
for i in range(w): # k-1
for j in range(w-i): # starting pos
s=(w-i-1)*(1-np.all(pat1[j:j+i+1]==pat2[j:j+i+1]))
d+=s
return d
# input a window of w CGs and output a list of proportions with starting genomic location and genomic distance across
def window_summ(pat,start,dis,chrom):
m=np.shape(pat)[0]
d=np.shape(pat)[1]
all_pos=np.zeros((2**d,d))
for i in range(d):
all_pos[:,i]=np.linspace(0,2**d-1,2**d)%(2**(i+1))//(2**i)
#print(all_pos)
prob=np.zeros((2**d,1))
#print(prob)
for i in range(2**d):
count = 0
for j in range(m):
if (all_pos[i,:]==pat.iloc[j,:]).sum()==d:
count += 1
#print(count)
prob[i]=count
if d==3:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'dis':dis})
if d==4:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'dis':dis})
if d==5:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'dis':dis})
if d==6:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'p33':prob[32],'p34':prob[33],'p35':prob[34],\
'p36':prob[35],'p37':prob[36],'p38':prob[37],'p39':prob[38],'p40':prob[39],\
'p41':prob[40],'p42':prob[41],'p43':prob[42],'p44':prob[43],'p45':prob[44],\
'p46':prob[45],'p47':prob[46],'p48':prob[47],'p49':prob[48],'p50':prob[49],\
'p51':prob[50],'p52':prob[51],'p53':prob[52],'p54':prob[53],'p55':prob[54],\
'p56':prob[55],'p57':prob[56],'p58':prob[57],'p59':prob[58],'p60':prob[59],\
'p61':prob[60],'p62':prob[61],'p63':prob[62],'p64':prob[63],'dis':dis})
return out
def MeHperwindow(pat,start,dis,chrom,D,w,optional,MeH=2,dist=1,strand='f'):
count=np.zeros((2**w,1))
m=np.shape(pat)[0]
pat=np.array(pat)
if w==2:
pat = Counter([str(i[0])+str(i[1]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00','10','01','11']])
if w==3:
pat = Counter([str(i[0])+str(i[1])+str(i[2]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000','100','010','110','001','101','011','111']])
if w==4:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if w==5:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if w==6:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
if MeH==1: # Abundance based
score=(((count/m)**2).sum(axis=0))**(-1)
elif MeH==2: # PWS based
interaction=np.multiply.outer(count/m,count/m).reshape((2**w,2**w))
Q=sum(sum(D*interaction))
#print("Q =",Q)
if Q==0:
score=0
else:
score=(sum(sum(D*(interaction**2)))/(Q**2))**(-0.5)
elif MeH==3: #Phylogeny based
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if dist==1 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(0.5,16)),np.repeat(0.25,6)),[0.5]),np.repeat(0.25,6))
#phylotree=np.repeat(0,1).append(np.repeat(0.5,16)).append(np.repeat(0.25,6)).append(0.5).append(np.repeat(0.25,6))
countn=np.zeros(30)
#count<-rep(0,29)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[4]+countn[7]
countn[18]=countn[9]+countn[12]
countn[19]=countn[1]+countn[2]
countn[20]=countn[3]+countn[6]
countn[21]=countn[17]+countn[18]
countn[22]=countn[19]+countn[20]
countn[23]=countn[21]+countn[22]
countn[24]=countn[5]+countn[8]
countn[25]=countn[10]+countn[13]
countn[26]=countn[24]+countn[25]
countn[27]=countn[23]+countn[26]
countn[28]=countn[11]+countn[14]
countn[29]=countn[27]+countn[28]
#Q=sum(sum(phylotree*count))
if dist==2 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(3,16)),np.repeat(1.5,6)),[3.2,0.8]),np.repeat(2,3),np.repeat(1.5,2))
#phylotree=c(rep(3,16),rep(1.5,6),3.2,0.8,rep(2,3),1.5,1.5)
countn=np.zeros(30)
#print(count)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[1]+countn[2]
countn[18]=countn[5]+countn[8]
countn[19]=countn[3]+countn[6]
countn[20]=countn[10]+countn[13]
countn[21]=countn[4]+countn[7]
countn[22]=countn[11]+countn[14]
countn[23]=countn[17]+countn[18]
countn[24]=countn[21]+countn[22]
countn[25]=countn[19]+countn[20]
countn[26]=countn[23]+countn[24]
countn[27]=countn[25]+countn[26]
countn[28]=countn[9]+countn[12]
countn[29]=countn[27]+countn[28]
#Q=sum(phylotree*count)
if dist==2 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(1.5,8)),np.repeat(0.75,3)),np.repeat(1.5,0.75))
#phylotree=np.array(0).append(np.repeat(1.5,8)).append(np.repeat(0.75,3)).append(1.5,0.75)
#phylotree=c(rep(1.5,8),rep(0.75,3),1.5,0.75)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#Q=sum(phylotree*count)
if dist==1 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(0.5,8)),np.repeat(0.25,3)),[0.5,0.25])
#phylotree=np.array(0).append(np.repeat(0.5,8)).append(np.repeat(0.25,3)).append(0.5,0.25)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#print("count = ",count)
#print("phylotree = ",phylotree)
Q=sum(phylotree*countn)
score=sum(phylotree*((countn/Q)**2))**(-1)
elif MeH==4: #Entropy
score=0
for i in count:
if i>0:
score-=(i/m)*np.log2(i/m)/w
elif MeH==5: #Epipoly
score=1-((count/m)**2).sum(axis=0)
if optional:
if MeH!=3:
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if w==3:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==4:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==5:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==6:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out, opt
else:
out=pd.DataFrame({'chrom':chrom,'pos':start,'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
return window
def CGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# initialise data frame for output
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','strand','depth'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
# all methylation patterns for Methylation heterogeneity evaluation
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
# distance matrix, also for Methylation heterogeneity evaluation
D=PattoDis(pd.DataFrame(all_pos),dist=dist) # 1:Hamming distance, 2: WDK
start=datetime.datetime.now()
# vector for saving methylation statuses before imputation
MU=np.zeros((2,w))
# screen bamfile by column
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now(),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# Forward strand, check if 'CG' in reference genome
if (fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+2)=='CG'):
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
# append reads in the column
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
temp=temp.append(df2, ignore_index=True)
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['G'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
# merge with other columns
if (not temp.empty):
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# Reverse strand, check if 'CG' in reference genome
if pileupcolumn.pos>1:
if (fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos+1)=='CG'):
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
dfr2 = pd.DataFrame(data=dr)
tempr=tempr.append(dfr2, ignore_index=True)
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['C'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
# Impute and estimate, if there are 2w-1 columns
if never and aggreC.shape[1] == (2*w):
# C/G to 1, rest to 0, N to NA
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC
meth = methbin.copy()
# remove read ID
meth = meth.drop('Qname',axis=1)
# back up for imputation
if imp:
methtemp = meth.copy()
# imputation by sliding window of 1 C
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# save methylation statuses before imputation
# check if eligible for imputation, impute
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# overwrite imputed window
meth = methtemp.copy()
# Evaluate methylation level and methylation heterogeneity and append to result
for i in range(0,w,1): # w windows
window = meth.iloc[:,range(i,i+w)].values
# check if enough complete patterns for evaluating MeH
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
# if need to output methylation patterns
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
# evaluate and output MeH
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
# remove 1 column
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
# drop rows with no values
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# Reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
#for i in range(0,meth.shape[1]-w+1,1):
#if i>w-2 and i<2*w:
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CG'
print("Done CG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
#samfile.close()
def CHHgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
start=datetime.datetime.now()
MU=np.zeros((2,w))
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHH %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# forward
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)!='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)!='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
tempr=tempr.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
#if enough_reads(window,w,complete=True):
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','G','A'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CHH'
print("Done CHH for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
def CHGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
coverage = cov_context = 0
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
MU=np.zeros((2,w))
start=datetime.datetime.now()
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)=='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)=='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # G
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2r = pd.DataFrame(data=dr)
#df2.head()
tempr=tempr.append(df2r, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
#temp.head()
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','G','N'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','C','T'],np.nan)
methbin = aggreR # backup
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#total += w
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','A','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CHG'
print("Done CHG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
def split_bam(samplenames,Folder):
# get bam size
spbam_list = []
bamfile = samplenames + '.bam'
statinfo_out = os.stat(Folder+bamfile)
bamsize = statinfo_out.st_size
samfile = pysam.Samfile(Folder+bamfile, "rb")
fileout_base = os.path.splitext(bamfile)[0] # filename
ext = '.bam'
x = 0
fileout = Folder+fileout_base+"_" + str(x)+ext # filename_x.bam
print("fileout",fileout)
header = samfile.header
outfile = pysam.Samfile(fileout, "wb", header = header)
sum_Outfile_Size=0
for reads in samfile.fetch():
outfile.write(reads)
statinfo_out = os.stat(fileout)
outfile_Size = statinfo_out.st_size
if(outfile_Size >=337374182 and sum_Outfile_Size <= bamsize):
sum_Outfile_Size = sum_Outfile_Size + outfile_Size
x = x + 1
spbam_list.append(fileout_base + "_" + str(x)+ext)
outfile.close()
pysam.index(fileout)
fileout = Folder+fileout_base + "_" + str(x)+ext
print("fileout",fileout)
outfile = pysam.Samfile(fileout, "wb",header = header)
outfile.close()
pysam.index(fileout)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--windowsize",type=int, default=4 ,help='number of CGs')
parser.add_argument("-c", "--cores",type=int, default=4, help='number of cores')
parser.add_argument("-m", "--MeH",type=int, default=2, help='Methylation heterogeneity score 1:Abundance 2:PW 3:Phylogeny')
parser.add_argument("-d", "--dist",type=int, default=1, help='Distance between methylation patterns 1:Hamming 2:WDK')
parser.add_argument("--CG", default=False, action='store_true', help='Include genomic context CG')
parser.add_argument("--CHG", default=False, action='store_true', help='Include genomic context CHG')
parser.add_argument("--CHH", default=False, action='store_true', help='Include genomic context CHH')
parser.add_argument("--opt", default=False, action='store_true', help='Outputs compositions of methylation patterns')
parser.add_argument('--mlv', default=False, action='store_true', help='Outputs methylation levels')
parser.add_argument('--imp', default=True, action='store_false', help='Implement BSImp (impute if valid)')
args = parser.parse_args()
import sys
import time
import os
import pandas as pd
import multiprocessing
from joblib import Parallel, delayed
#num_cores = multiprocessing.cpu_count()
if __name__ == "__main__":
open_log('MeHscreening.log')
logm("Call genome screening.")
#start = time.time()
Folder = 'MeHdata/'
files = os.listdir(Folder)
bam_list = []
# all samples' bam files
for file in files:
filename, file_extension = os.path.splitext(file)
if file_extension == '.fa':
fa = filename
if file_extension == '.bam':
bam_list.append(filename)
#if 'cores' in args:
# num_cores = args.cores
#else:
# num_cores = 4
Parallel(n_jobs=args.cores)(delayed(split_bam)(bamfile,Folder=Folder) for bamfile in bam_list)
spbam_list = []
tempfiles = os.listdir(Folder)
for file in tempfiles:
filename, file_extension = os.path.splitext(file)
if file_extension=='.bam' and filename not in bam_list:
spbam_list.append(filename)
#print(spbam_list)
topp = pd.DataFrame(columns=['sample','coverage','context_coverage','context'])
#CG = []
#start=t.time()
if args.CG:
con='CG'
CG=Parallel(n_jobs=args.cores)(delayed(CGgenome_scr)(bamfile,w=args.windowsize,fa=fa,MeH=args.MeH,dist=args.dist,optional=args.opt,melv=args.mlv,imp=args.imp) for bamfile in spbam_list)
logm("Merging MeH within samples for CG.")
# merge MeH within sample
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
print("Merging within sample",sample,"...")
if not sample == filename:
res_dir = Folder + con + '_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
#os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
# not into bins of 400bp
if args.opt:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
#print("sample = ",sample)
if not sample == filename:
res_dir = Folder + con + '_opt_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_opt_' +file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False, header = True)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend.to_csv(res_dir,index = False,header=True)
#os.chdir('../')
#os.chdir(outputFolder)
logm("Merging ML within samples for CG.")
# append ML within samples
if args.mlv:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
res_dir = Folder + con + '_ML_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_ML_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
#os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
#print(Toappend)
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
logm("Merging ML between samples for CG.")
# merge ML between samples
if args.mlv:
for sample in bam_list:
tomerge_dir = Folder + con + '_ML_' + str(sample) + '.csv'
res_dir = Folder + con + '_ML_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'ML': sample})
Result=Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result = Result.rename(columns={'ML': sample})
#Result = Result.drop(columns=['counts','pos','depth','dis'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
logm("Merging MeH between samples for CG.")
# merge MeH between samples
for sample in bam_list:
tomerge_dir = Folder + con + '_' + str(sample) + '.csv'
res_dir = Folder + con + '_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'MeH': sample})
Result = Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result.head()
Result.dropna(axis = 0, thresh=4, inplace = True)
Result = Result.rename(columns={'MeH': sample})
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
Result.to_csv(Folder + con + '_' +'Results.csv' ,index = False,header=True)
print("All done.",len(bam_list),"bam files processed and merged for CG.")
logm("All done. "+str(len(bam_list))+" bam files processed and merged for CG.")
for i in CG:
toout=pd.DataFrame({'sample':i[0],'coverage':i[1],'context_coverage':i[2],'context':i[3]},index=[0])
topp=topp.append(toout)
if args.CHG:
con='CHG'
CG=Parallel(n_jobs=args.cores)(delayed(CHGgenome_scr)(bamfile,w=args.windowsize,fa=fa,MeH=args.MeH,dist=args.dist,optional=args.opt,melv=args.mlv,imp=args.imp) for bamfile in spbam_list)
logm("Merging MeH within samples for CHG.")
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
print("Merging within sample",sample,"...")
if not sample == filename:
res_dir = Folder + con + '_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
# not into bins of 400bp
if args.opt:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
#print("sample = ",sample)
if not sample == filename:
res_dir = Folder + con + '_opt_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_opt_' +file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
logm("Merging ML within samples for CHG.")
# append ML within samples
if args.mlv:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
res_dir = Folder + con + '_ML_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_ML_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
#Count=Count.drop_duplicates()
#print(Count)
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
#print(Toappend)
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
os.remove(toapp_dir)
logm("Merging MeH between samples for CHG.")
# merge MeH between samples
for sample in bam_list:
tomerge_dir = Folder + con + '_' + str(sample) + '.csv'
res_dir = Folder + con + '_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
#Tomerge = Tomerge.drop(columns=['dis','ML','depth'])
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'MeH': sample})
Result = Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result.head()
Result = Result.rename(columns={'MeH': sample})
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
logm("Merging ML between samples for CHG.")
# merge ML between samples
if args.mlv:
for sample in bam_list:
tomerge_dir = Folder + con + '_ML_' + str(sample) + '.csv'
res_dir = Folder + con + '_ML_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'ML': sample})
Result=Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result = Result.rename(columns={'ML': sample})
#Result = Result.drop(columns=['counts','pos','depth','dis'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
logm("All done. "+str(len(bam_list))+" bam files processed and merged for CHG.")
for i in CG:
toout=pd.DataFrame({'sample':i[0],'coverage':i[1],'context_coverage':i[2],'context':i[3]},index=[0])
topp=topp.append(toout)
if args.CHH:
con='CHH'
CG=Parallel(n_jobs=args.cores)(delayed(CHHgenome_scr)(bamfile,w=args.windowsize,fa=fa,MeH=args.MeH,dist=args.dist,optional=args.opt,melv=args.mlv,imp=args.imp) for bamfile in spbam_list)
logm("Merging MeH within samples for CHH.")
# merge MeH within sample
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
print("Merging within sample",sample,"...")
if not sample == filename:
res_dir = Folder + con + '_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = | pd.read_csv(res_dir) | pandas.read_csv |
from hetdesrun.component.registration import register
from hetdesrun.datatypes import DataType
import pandas as pd
from scipy.signal import butter
from scipy.signal import lfilter
# ***** DO NOT EDIT LINES BELOW *****
# These lines may be overwritten if input/output changes.
@register(inputs={"data": DataType.Series, "frequency": DataType.Float},outputs={"filtered": DataType.Series})
def main(*, data, frequency):
"""entrypoint function for this component"""
# ***** DO NOT EDIT LINES ABOVE *****
# write your code here.
nyq = 0.5 * data.size / ((data.index[-1] - data.index[0]).total_seconds())
normal_frequency = frequency / nyq
b, a = butter(1, normal_frequency, btype="high", analog=False)
filtered = lfilter(b, a, data)
return {"filtered": | pd.Series(filtered, index=data.index) | pandas.Series |
import numpy as np
import pandas as pd
import xarray as xr
from raven import models
from .common import TESTDATA
import pytest
@pytest.mark.skip
class TestGR4JCemaneige():
def test_simple(self):
time = | pd.date_range('2000-01-01', freq='D', periods=365 * 3) | pandas.date_range |
# Implementation of support points generator
#
# Reference:
# <NAME>, "Support Points" (2018) *The Annals of Statistics*
__all__ = [
"tran_sp",
"tf_sp",
]
from grama import add_pipe
from numpy import diag, eye, ma, newaxis, number, zeros
from numpy.linalg import norm
from numpy.random import choice, multivariate_normal
from numpy.random import seed as setseed
from pandas import DataFrame
from toolz import curry
from warnings import warn
## Helper functions
##################################################
def _iterate_x(X, Y, ind):
r"""Iterate a single candidate point
Implementation of Equation (22) from Mak and Joseph (2018)
Arguments:
X (np.array): candidate points, X.shape == (n, p)
Y (np.array): target points, Y.shape == (N, p)
ind (int): candidate to iterate, 0 <= ind <= n - 1
Returns:
np.array: updated candidate point
"""
## Setup
n = X.shape[0]
N = Y.shape[0]
## Compute iteration
# First term
diffx = ma.array(X[ind] - X, mask=False)
diffx[ind].mask = True
diffx_norm = ma.array(norm(diffx, axis=1), mask=False)
diffx_norm.mask[ind] = True
t1 = (N / n) * (diffx / diffx_norm[:, newaxis]).sum(axis=0)
# Second term
diffy_norm = norm(X[ind] - Y, axis=1)
q = (1 / diffy_norm).sum()
t2 = (Y / diffy_norm[:, newaxis]).sum(axis=0)
return (1 / q) * (t1 + t2)
def _sp_cpp(X0, Y, delta=1e-6, iter_max=500):
r"""Implementation of sp.cpp algorithm
Implementation of sp.cpp algorithm from Mak and Joseph (2018). Note that
this implementation takes
Signature:
X, d, iter_c = _sp_cpp(X0, Y)
Arguments:
X0 (np.array): initial candidate points, X0.shape == (n, p)
Y (np.array): target points, Y.shape == (N, p)
delta (float): convergence criterion, as average pairwise-distance
between iterations
iter_max (int): maximum iteration count
Returns:
X (np.array): optimized support points
d (float): average pairwise-distance at termination
iter_c (int): iteration count at termination
"""
## Setup
N, p = Y.shape
n = X0.shape[0]
Xn = X0.copy()
## Primary loop
d = delta * 2
iter_c = 0
# Check convergence criterion
while (d >= delta) and (iter_c < iter_max):
# Update the candidate points
for i in range(n):
Xn[i] = _iterate_x(X0, Y, i)
# Update loop variables
d = norm(X0 - Xn, axis=1).mean()
iter_c = iter_c + 1
# Overwrite X0
X0 = Xn.copy()
## DEBUG
return Xn, d, iter_c
def _perturbed_choice(Y, n):
r"""Choose a set of perturbed points
Arguments:
Y (np.array): target points, Y.shape == (N, p)
Returns:
np.array: perturbed points, shape == (n, p)
"""
i0 = choice(Y.shape[0], size=n)
# Add noise to initial proposal to avoid X-Y overlap;
# random directions with fixed distance
V_rand = multivariate_normal(zeros(Y.shape[1]), eye(Y.shape[1]), size=n)
V_rand = V_rand / norm(V_rand, axis=1)[:, newaxis]
X0 = Y[i0] + V_rand * Y.std(axis=0)
return X0
## Public interfaces
##################################################
@curry
def tran_sp(
df,
n=None,
var=None,
n_maxiter=500,
tol=1e-3,
seed=None,
verbose=True,
standardize=True,
):
r"""Compact a dataset with support points
Arguments:
df (DataFrame): dataset to compact
n (int): number of samples for compacted dataset
var (list of str): list of variables to compact, must all be numeric
n_maxiter (int): maximum number of iterations for support point algorithm
tol (float): convergence tolerance
verbose (bool): print messages to the console?
standardize (bool): standardize columns before running sp? (Restores after sp)
Returns:
DataFrame: dataset compacted with support points
Examples:
>>> import grama as gr
>>> from grama.data import df_diamonds
>>> df_sp = gr.tran_sp(df_diamonds, n=50, var=["price", "carat"])
"""
## Setup
setseed(seed)
# Handle input variables
if var is None:
# Select numeric columns only
var = list(df.select_dtypes(include=[number]).columns)
if verbose:
print("tran_sp has selected var = {}".format(var))
# Extract values
Y = df[var].values
if standardize:
Y_mean = Y.mean(axis=0)
Y_sd = Y.std(axis=0)
Y = (Y - Y_mean) / Y_sd
# Generate initial proposal points
X0 = _perturbed_choice(Y, n)
## Run sp.ccp algorithm
X, d, iter_c = _sp_cpp(X0, Y, delta=tol, iter_max=n_maxiter)
if verbose:
print(
"tran_sp finished in {0:} iterations with distance criterion {1:4.3e}".format(
iter_c, d
)
)
if d > tol:
warn(
"Convergence tolerance not met; d = {0:4.3e} > tol = {1:4.3e}".format(
d, tol
),
RuntimeWarning,
)
if standardize:
X = X * Y_sd + Y_mean
## Package results
return | DataFrame(data=X, columns=var) | pandas.DataFrame |
import pandas as pd
import numpy as np
import logging
import os
import math
import logging
import sys
import os
import random
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from global_variables import config as g
# ROOT_DIR = os.path.dirname(os.path.abspath("top_level_file.txt"))
ROOT_DIR = g.ROOT_DIR
raw_data_dir = g.raw_data_dir
processed_data_dir = g.processed_data_dir
raw_data_dir = g.raw_data_dir
FILL_NAN_WITH_ZERO = False
FILL_NAN_WITH_MEDIAN = False
FILL_NAN_WITH_MEAN = False
INTERPOLATE_NAN_VALUES = False
DELETE_NAN_ROWS = False
REPLACE_WITH_PREVIOUS_DAY = True
def import_and_merge_data(input_filepath:str=raw_data_dir, output_filepath:str=processed_data_dir):
logger = logging.getLogger("def import_and_merge_data")
da_prices = pd.read_csv(input_filepath + "da_prices.csv")
loadcons = | pd.read_csv(input_filepath + "loadcons.csv") | pandas.read_csv |
import pytest
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import pandas as pd
from generators import (
generate_votes,
row_maker,
)
def test_row_keys():
voting_machine = row_maker()
row = voting_machine()
assert list(row.keys()) == ["timestamp", "id", "region", "vote"]
@settings(deadline=None)
@given(st.integers(min_value=0, max_value=200))
def test_generated_as_many_votes_as_requested(length):
votes = generate_votes(length)
assert (length == 0 and votes.empty) or generate_votes(length).shape == (length, 4)
@settings(deadline=None)
@given(st.integers(min_value=0, max_value=200))
def test_votes_columns(length):
data = generate_votes(length)
assert (length == 0 and data.empty) or list(data.columns) == [
"timestamp",
"id",
"region",
"vote",
]
@settings(deadline=None)
@given(st.integers(min_value=1, max_value=200))
def test_id_lengths(length):
string_lengths = generate_votes(length)["id"].apply(lambda x: len(x))
assert all(uid_len == 36 for uid_len in string_lengths)
@settings(deadline=None)
@given(st.integers(min_value=1, max_value=200))
def test_ids_have_no_repetitions(length):
assert generate_votes(length)["id"].drop_duplicates().shape[0] == length
@settings(deadline=None)
@given(st.integers(min_value=1, max_value=200))
def test_timestamps_have_constant_date(length):
dates = list(generate_votes(length)["timestamp"].dt.date.unique())
assert (length == 0 and not dates) or dates == [pd.Timestamp("2020-12-10")]
@settings(deadline=None)
@given(st.integers(min_value=1, max_value=200))
def test_timestamps_have_hours_within_range(length):
hours = generate_votes(length)["timestamp"].dt.hour.unique()
assert all(hour in range(8, 21) for hour in hours)
@settings(deadline=None)
@given(st.integers(min_value=1000, max_value=1400))
def test_all_regions_appear(length):
expected_regions = set(pd.read_csv("data/region_data.csv").region)
actual_regions = set(generate_votes(length)["region"].unique())
assert expected_regions == actual_regions
@settings(deadline=None)
@given(st.integers(min_value=1000, max_value=1800))
def test_regions_distribution(length):
expected = pd.read_csv("data/region_data.csv", usecols=["region", "percent"])
regions = pd.DataFrame(generate_votes(length)["region"])
regions["cnt"] = 1
actual = (regions.groupby("region").agg("count") / length).reset_index()
joined = pd.merge(expected, actual, on="region")
assert joined.shape == (51, 3)
joined["diff"] = np.abs(joined["percent"] - joined["cnt"])
assert joined["diff"].max() < 0.05
@settings(deadline=None)
@given(st.integers(min_value=1000, max_value=1500))
def test_votes_have_three_colours(length):
expected = {"yellow", "blue", "red"}
actual = set(generate_votes(length)["vote"].unique())
assert expected == actual
@settings(deadline=None)
@given(st.integers(min_value=1000, max_value=1500))
def test_timestamp_distribution_blue(length):
colors = (
| pd.read_csv("data/region_data.csv", usecols=["region", "color"]) | pandas.read_csv |
import sys
import time
import pandas as pd
import numpy as np
import copyreg, types
from tqdm import tqdm
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-talk')
plt.style.use('bmh')
#plt.rcParams['font.family'] = 'DejaVu Sans Mono'
plt.rcParams['font.size'] = 9.5
plt.rcParams['font.weight'] = 'medium'
# =======================================================
# Symmetric CUSUM Filter [2.5.2.1]
def getTEvents(gRaw, h):
"""cusum filter
args
----
gRaw: array-like
h: int() or float()
returns
-------
pd.DatetimeIndex()
"""
tEvents, sPos, sNeg = [], 0, 0
diff = np.log(gRaw).diff().dropna().abs()
for i in tqdm(diff.index[1:]):
try:
pos, neg = float(sPos+diff.loc[i]), float(sNeg+diff.loc[i])
except Exception as e:
print(e)
print(sPos+diff.loc[i], type(sPos+diff.loc[i]))
print(sNeg+diff.loc[i], type(sNeg+diff.loc[i]))
break
sPos, sNeg=max(0., pos), min(0., neg)
if sNeg<-h:
sNeg=0;tEvents.append(i)
elif sPos>h:
sPos=0;tEvents.append(i)
return pd.DatetimeIndex(tEvents)
# =======================================================
# Daily Volatility Estimator [3.1]
## for wtvr reason dates are not aligned for return calculation
## must account for it for computation
def getDailyVol(close,span0=100):
# daily vol reindexed to close
df0=close.index.searchsorted(close.index-pd.Timedelta(days=1))
#bp()
df0=df0[df0>0]
#bp()
df0=(pd.Series(close.index[df0-1],
index=close.index[close.shape[0]-df0.shape[0]:]))
#bp()
try:
df0=close.loc[df0.index]/close.loc[df0.values].values-1 # daily rets
except Exception as e:
print(e)
print('adjusting shape of close.loc[df0.index]')
cut = close.loc[df0.index].shape[0] - close.loc[df0.values].shape[0]
df0=close.loc[df0.index].iloc[:-cut]/close.loc[df0.values].values-1
df0=df0.ewm(span=span0).std().rename('dailyVol')
return df0
# =======================================================
# Triple-Barrier Labeling Method [3.2]
def applyPtSlOnT1(close,events,ptSl,molecule):
# apply stop loss/profit taking, if it takes place before t1 (end of event)
events_=events.loc[molecule]
out=events_[['t1']].copy(deep=True)
if ptSl[0]>0: pt=ptSl[0]*events_['trgt']
else: pt=pd.Series(index=events.index) # NaNs
if ptSl[1]>0: sl=-ptSl[1]*events_['trgt']
else: sl=pd.Series(index=events.index) # NaNs
for loc,t1 in events_['t1'].fillna(close.index[-1]).iteritems():
df0=close[loc:t1] # path prices
df0=(df0/close[loc]-1)*events_.at[loc,'side'] # path returns
out.loc[loc,'sl']=df0[df0<sl[loc]].index.min() # earliest stop loss
out.loc[loc,'pt']=df0[df0>pt[loc]].index.min() # earliest profit taking
return out
# =======================================================
# Gettting Time of First Touch (getEvents) [3.3]
def getEvents(close, tEvents, ptSl, trgt, minRet, numThreads,t1=False, side=None):
#1) get target
trgt=trgt.loc[tEvents]
trgt=trgt[trgt>minRet] # minRet
#2) get t1 (max holding period)
if t1 is False:t1=pd.Series(pd.NaT, index=tEvents)
#3) form events object, apply stop loss on t1
if side is None:side_,ptSl_=pd.Series(1.,index=trgt.index), [ptSl[0],ptSl[0]]
else: side_,ptSl_=side.loc[trgt.index],ptSl[:2]
events=(pd.concat({'t1':t1,'trgt':trgt,'side':side_}, axis=1)
.dropna(subset=['trgt']))
df0=mpPandasObj(func=applyPtSlOnT1,pdObj=('molecule',events.index),
numThreads=numThreads,close=close,events=events,
ptSl=ptSl_)
events['t1']=df0.dropna(how='all').min(axis=1) #pd.min ignores nan
if side is None:events=events.drop('side',axis=1)
return events
# =======================================================
# Adding Vertical Barrier [3.4]
def addVerticalBarrier(tEvents, close, numDays=1):
t1=close.index.searchsorted(tEvents+pd.Timedelta(days=numDays))
t1=t1[t1<close.shape[0]]
t1=(pd.Series(close.index[t1],index=tEvents[:t1.shape[0]]))
return t1
# =======================================================
# Labeling for side and size [3.5, 3.8]
def getBins(events, close, t1=None):
'''
Compute event's outcome (including side information, if provided).
events is a DataFrame where:
-events.index is event's starttime
-events['t1'] is event's endtime
-events['trgt'] is event's target
-events['side'] (optional) implies the algo's position side
-t1 is original vertical barrier series
Case 1: ('side' not in events): bin in (-1,1) <-label by price action
Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)
'''
# 1) prices aligned with events
events_ = events.dropna(subset=['t1'])
px = events_.index.union(events_['t1'].values).drop_duplicates()
px = close.reindex(px, method='bfill')
# 2) create out object
out = pd.DataFrame(index=events_.index)
out['ret'] = px.loc[events_['t1'].values].values / px.loc[
events_.index] - 1
if 'side' in events_: out['ret'] *= events_['side'] # meta-labeling
out['bin'] = np.sign(out['ret'])
if 'side' not in events_:
# only applies when not meta-labeling.
# to update bin to 0 when vertical barrier is touched, we need the
# original vertical barrier series since the events['t1'] is the time
# of first touch of any barrier and not the vertical barrier
# specifically. The index of the intersection of the vertical barrier
# values and the events['t1'] values indicate which bin labels needs
# to be turned to 0.
vtouch_first_idx = events[events['t1'].isin(t1.values)].index
out.loc[vtouch_first_idx, 'bin'] = 0.
if 'side' in events_: out.loc[out['ret'] <= 0, 'bin'] = 0 # meta-labeling
return out
# =======================================================
# Expanding getBins to Incorporate Meta-Labeling [3.7]
def getBinsOld(events, close):
'''
Compute event's outcome (including side information, if provided).
events is a DataFrame where:
-events.index is event's starttime
-events['t1'] is event's endtime
-events['trgt'] is event's target
-events['side'] (optional) implies the algo's position side
Case 1: ('side' not in events): bin in (-1,1) <-label by price action
Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)
'''
#1) prices aligned with events
events_=events.dropna(subset=['t1'])
px=events_.index.union(events_['t1'].values).drop_duplicates()
px=close.reindex(px,method='bfill')
#2) create out object
out=pd.DataFrame(index=events_.index)
out['ret']=px.loc[events_['t1'].values].values/px.loc[events_.index]-1
if 'side' in events_:out['ret']*=events_['side'] # meta-labeling
out['bin']=np.sign(out['ret'])
if 'side' in events_:out.loc[out['ret']<=0,'bin']=0 # meta-labeling
return out
# =======================================================
# Dropping Unnecessary Labels [3.8]
def dropLabels(events, minPct=.05):
# apply weights, drop labels with insufficient examples
while True:
df0=events['bin'].value_counts(normalize=True)
if df0.min()>minPct or df0.shape[0]<3:break
print('dropped label: ', df0.argmin(),df0.min())
events=events[events['bin']!=df0.argmin()]
return events
# =======================================================
# Linear Partitions [20.4.1]
def linParts(numAtoms,numThreads):
# partition of atoms with a single loop
parts=np.linspace(0,numAtoms,min(numThreads,numAtoms)+1)
parts=np.ceil(parts).astype(int)
return parts
def nestedParts(numAtoms,numThreads,upperTriang=False):
# partition of atoms with an inner loop
parts,numThreads_=[0],min(numThreads,numAtoms)
for num in range(numThreads_):
part=1+4*(parts[-1]**2+parts[-1]+numAtoms*(numAtoms+1.)/numThreads_)
part=(-1+part**.5)/2.
parts.append(part)
parts=np.round(parts).astype(int)
if upperTriang: # the first rows are heaviest
parts=np.cumsum(np.diff(parts)[::-1])
parts=np.append(np.array([0]),parts)
return parts
# =======================================================
# multiprocessing snippet [20.7]
def mpPandasObj(func,pdObj,numThreads=24,mpBatches=1,linMols=True,**kargs):
'''
Parallelize jobs, return a dataframe or series
+ func: function to be parallelized. Returns a DataFrame
+ pdObj[0]: Name of argument used to pass the molecule
+ pdObj[1]: List of atoms that will be grouped into molecules
+ kwds: any other argument needed by func
Example: df1=mpPandasObj(func,('molecule',df0.index),24,**kwds)
'''
import pandas as pd
#if linMols:parts=linParts(len(argList[1]),numThreads*mpBatches)
#else:parts=nestedParts(len(argList[1]),numThreads*mpBatches)
if linMols:parts=linParts(len(pdObj[1]),numThreads*mpBatches)
else:parts=nestedParts(len(pdObj[1]),numThreads*mpBatches)
jobs=[]
for i in range(1,len(parts)):
job={pdObj[0]:pdObj[1][parts[i-1]:parts[i]],'func':func}
job.update(kargs)
jobs.append(job)
if numThreads==1:out=processJobs_(jobs)
else: out=processJobs(jobs,numThreads=numThreads)
if isinstance(out[0],pd.DataFrame):df0=pd.DataFrame()
elif isinstance(out[0],pd.Series):df0=pd.Series()
else:return out
for i in out:df0=df0.append(i)
df0=df0.sort_index()
return df0
# =======================================================
# single-thread execution for debugging [20.8]
def processJobs_(jobs):
# Run jobs sequentially, for debugging
out=[]
for job in jobs:
out_=expandCall(job)
out.append(out_)
return out
# =======================================================
# Example of async call to multiprocessing lib [20.9]
import multiprocessing as mp
import datetime as dt
#________________________________
def reportProgress(jobNum,numJobs,time0,task):
# Report progress as asynch jobs are completed
msg=[float(jobNum)/numJobs, (time.time()-time0)/60.]
msg.append(msg[1]*(1/msg[0]-1))
timeStamp=str(dt.datetime.fromtimestamp(time.time()))
msg=timeStamp+' '+str(round(msg[0]*100,2))+'% '+task+' done after '+ \
str(round(msg[1],2))+' minutes. Remaining '+str(round(msg[2],2))+' minutes.'
if jobNum<numJobs:sys.stderr.write(msg+'\r')
else:sys.stderr.write(msg+'\n')
return
#________________________________
def processJobs(jobs,task=None,numThreads=24):
# Run in parallel.
# jobs must contain a 'func' callback, for expandCall
if task is None:task=jobs[0]['func'].__name__
pool=mp.Pool(processes=numThreads)
outputs,out,time0=pool.imap_unordered(expandCall,jobs),[],time.time()
# Process asyn output, report progress
for i,out_ in enumerate(outputs,1):
out.append(out_)
reportProgress(i,len(jobs),time0,task)
pool.close();pool.join() # this is needed to prevent memory leaks
return out
# =======================================================
# Unwrapping the Callback [20.10]
def expandCall(kargs):
# Expand the arguments of a callback function, kargs['func']
func=kargs['func']
del kargs['func']
out=func(**kargs)
return out
# =======================================================
# Pickle Unpickling Objects [20.11]
def _pickle_method(method):
func_name=method.im_func.__name__
obj=method.im_self
cls=method.im_class
return _unpickle_method, (func_name,obj,cls)
#________________________________
def _unpickle_method(func_name,obj,cls):
for cls in cls.mro():
try:func=cls.__dict__[func_name]
except KeyError:pass
else:break
return func.__get__(obj,cls)
#________________________________
# =======================================================
# Estimating uniqueness of a label [4.1]
def mpNumCoEvents(closeIdx,t1,molecule):
'''
Compute the number of concurrent events per bar.
+molecule[0] is the date of the first event on which the weight will be computed
+molecule[-1] is the date of the last event on which the weight will be computed
Any event that starts before t1[modelcule].max() impacts the count.
'''
#1) find events that span the period [molecule[0],molecule[-1]]
t1=t1.fillna(closeIdx[-1]) # unclosed events still must impact other weights
t1=t1[t1>=molecule[0]] # events that end at or after molecule[0]
t1=t1.loc[:t1[molecule].max()] # events that start at or before t1[molecule].max()
#2) count events spanning a bar
iloc=closeIdx.searchsorted(np.array([t1.index[0],t1.max()]))
count=pd.Series(0,index=closeIdx[iloc[0]:iloc[1]+1])
for tIn,tOut in t1.iteritems():count.loc[tIn:tOut]+=1.
return count.loc[molecule[0]:t1[molecule].max()]
# =======================================================
# Estimating the average uniqueness of a label [4.2]
def mpSampleTW(t1,numCoEvents,molecule):
# Derive avg. uniqueness over the events lifespan
wght=pd.Series(index=molecule)
for tIn,tOut in t1.loc[wght.index].iteritems():
wght.loc[tIn]=(1./numCoEvents.loc[tIn:tOut]).mean()
return wght
# =======================================================
# Sequential Bootstrap [4.5.2]
## Build Indicator Matrix [4.3]
def getIndMatrix(barIx,t1):
# Get Indicator matrix
indM=(pd.DataFrame(0,index=barIx,columns=range(t1.shape[0])))
for i,(t0,t1) in enumerate(t1.iteritems()):indM.loc[t0:t1,i]=1.
return indM
# =======================================================
# Compute average uniqueness [4.4]
def getAvgUniqueness(indM):
# Average uniqueness from indicator matrix
c=indM.sum(axis=1) # concurrency
u=indM.div(c,axis=0) # uniqueness
avgU=u[u>0].mean() # avg. uniqueness
return avgU
# =======================================================
# return sample from sequential bootstrap [4.5]
def seqBootstrap(indM,sLength=None):
# Generate a sample via sequential bootstrap
if sLength is None:sLength=indM.shape[1]
phi=[]
while len(phi)<sLength:
avgU=pd.Series()
for i in indM:
indM_=indM[phi+[i]] # reduce indM
avgU.loc[i]=getAvgUniqueness(indM_).iloc[-1]
prob=avgU/avgU.sum() # draw prob
phi+=[np.random.choice(indM.columns,p=prob)]
return phi
# =======================================================
# Determination of sample weight by absolute return attribution [4.10]
def mpSampleW(t1,numCoEvents,close,molecule):
# Derive sample weight by return attribution
ret=np.log(close).diff() # log-returns, so that they are additive
wght=pd.Series(index=molecule)
for tIn,tOut in t1.loc[wght.index].iteritems():
wght.loc[tIn]=(ret.loc[tIn:tOut]/numCoEvents.loc[tIn:tOut]).sum()
return wght.abs()
# =======================================================
# fractionally differentiated features snippets
# =======================================================
# get weights
def getWeights(d,size):
# thres>0 drops insignificant weights
w=[1.]
for k in range(1,size):
w_ = -w[-1]/k*(d-k+1)
w.append(w_)
w=np.array(w[::-1]).reshape(-1,1)
return w
def getWeights_FFD(d,thres):
w,k=[1.],1
while True:
w_=-w[-1]/k*(d-k+1)
if abs(w_)<thres:break
w.append(w_);k+=1
return np.array(w[::-1]).reshape(-1,1)
# =======================================================
# expanding window fractional differentiation
def fracDiff(series, d, thres=0.01):
'''
Increasing width window, with treatment of NaNs
Note 1: For thres=1, nothing is skipped
Note 2: d can be any positive fractional, not necessarily
bounded between [0,1]
'''
#1) Compute weights for the longest series
w=getWeights(d, series.shape[0])
#2) Determine initial calcs to be skipped based on weight-loss threshold
w_=np.cumsum(abs(w))
w_ /= w_[-1]
skip = w_[w_>thres].shape[0]
#3) Apply weights to values
df={}
for name in series.columns:
seriesF, df_=series[[name]].fillna(method='ffill').dropna(), pd.Series()
for iloc in range(skip, seriesF.shape[0]):
loc=seriesF.index[iloc]
if not np.isfinite(series.loc[loc,name]).any():continue # exclude NAs
try:
df_.loc[loc]=np.dot(w[-(iloc+1):,:].T, seriesF.loc[:loc])[0,0]
except:
continue
df[name]=df_.copy(deep=True)
df=pd.concat(df,axis=1)
return df
# =======================================================
# fixed-width window fractional differentiation
def fracDiff_FFD(series,d,thres=1e-5):
# Constant width window (new solution)
w = getWeights_FFD(d,thres)
width = len(w)-1
df={}
for name in series.columns:
seriesF, df_=series[[name]].fillna(method='ffill').dropna(), pd.Series()
for iloc1 in range(width,seriesF.shape[0]):
loc0,loc1=seriesF.index[iloc1-width], seriesF.index[iloc1]
test_val = series.loc[loc1,name] # must resample if duplicate index
if isinstance(test_val, (pd.Series, pd.DataFrame)):
test_val = test_val.resample('1m').mean()
if not np.isfinite(test_val).any(): continue # exclude NAs
try:
df_.loc[loc1]=np.dot(w.T, seriesF.loc[loc0:loc1])[0,0]
except:
continue
df[name]=df_.copy(deep=True)
df=pd.concat(df,axis=1)
return df
"""
def fracDiff_FFD(series,d,thres=1e-5):
'''
Constant width window (new solution)
Note 1: thres determines the cut-off weight for the window
Note 2: d can be any positive fractional, not necessarily
bounded [0,1].
'''
#1) Compute weights for the longest series
w=getWeights_FFD(d, thres) ## WHERE IS THIS FUNCTION IN THE BOOK
width=len(w)-1
#2) Apply weights to values
df={}
for name in series.columns:
seriesF, df_=series[[name]].fillna(method='ffill').dropna(), pd.Series()
for iloc1 in range(width,seriesF.shape[0]):
loc0,loc1=seriesF.index[iloc1-width], seriesF.index[iloc1]
if not np.isfinite(series.loc[loc1,name]): continue # exclude NAs
df_.loc[loc1]=np.dot(w.T, seriesF.loc[loc0:loc1])[0,0]
df[name]=df_.copy(deep=True)
df=pd.concat(df,axis=1)
return df
"""
# =======================================================
# finding the min. D value that passes ADF test
def plotMinFFD(df0, thres=1e-5):
# pg. 85
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
out=pd.DataFrame(columns=['adfStat','pVal','lags','nObs','95% conf','corr'])
for d in np.linspace(0,1,11):
df1=np.log(df0[['close']]).resample('1D').last() # downcast to daily obs
df2=fracDiff_FFD(df1,d,thres=thres)
corr=np.corrcoef(df1.loc[df2.index,'close'],df2['close'])[0,1]
df2=adfuller(df2['close'],maxlag=1,regression='c',autolag=None)
out.loc[d]=list(df2[:4])+[df2[4]['5%']]+[corr] # with critical value
f,ax=plt.subplots(figsize=(9,5))
out[['adfStat','corr']].plot(ax=ax, secondary_y='adfStat')
plt.axhline(out['95% conf'].mean(),linewidth=1,color='r',linestyle='dotted')
return out
# =======================================================
# Modeling snippets
# =======================================================
# =======================================================
# Purging observations in the training set (7.1)
def getTrainTimes(t1,testTimes):
"""
Given testTimes, find the times of the training observations
-t1.index: Time when the observation started
-t1.value: Time when the observation ended
-testTimes: Times of testing observations
"""
trn=t1.copy(deep=True)
for i,j in testTimes.iteritems():
df0=trn[(i<=trn.index)&(trn.index<=j)].index # train starts within test
df1=trn[(i<=trn)&(trn<=j)].index # train ends within test
df2=trn[(trn.index<=i)&(j<=trn)].index # train envelops test
trn=trn.drop(df0.union(df1).union(df2))
return trn
# =======================================================
# Embargo on Training Observations (7.2)
def getEmbargoTimes(times,pctEmbargo):
# Get embargo time for each bar
step=int(times.shape[0]*pctEmbargo)
if step==0:
mbrg=pd.Series(times,index=times)
else:
mbrg=pd.Series(times[step:],index=times[:-step])
mbrg=mbrg.append(pd.Series(times[-1],index=times[-step:]))
return mbrg
## Examples
# testtimes=pd.Series(mbrg[dt1],index=[dt0]) # include embargo before purge
# trainTimes=getTrainTimes(t1,testTimes)
# testTimes=t1.loc[dt0:dt1].index
# =======================================================
# Cross-validation class when observations overlap (7.3)
from sklearn.model_selection._split import _BaseKFold
class PurgedKFold(_BaseKFold):
"""
Extend KFold class to work with labels that span intervals
The train is purged of observations overlapping test-label intervals
Test set is assumed contiguous (shuffle=False), w/o training samples in between
"""
def __init__(self,n_splits=3,t1=None,pctEmbargo=0.):
if not isinstance(t1,pd.Series):
raise ValueError('Label Through Dates must be a pd.Series')
super(PurgedKFold,self).__init__(n_splits,shuffle=False,random_state=None)
self.t1=t1
self.pctEmbargo=pctEmbargo
def split(self,X,y=None,groups=None):
if (X.index==self.t1.index).sum()!=len(self.t1):
raise ValueError('X and ThruDateValues must have the same index')
# TODO: grouping function combinations insert here??
# manage groups by using label in dataframe?
# use combinations + group label to split into chunks??
indices=np.arange(X.shape[0])
mbrg=int(X.shape[0]*self.pctEmbargo)
test_starts=[
(i[0],i[-1]+1) for i in np.array_split(np.arange(X.shape[0]),
self.n_splits)
]
for i,j in test_starts:
t0=self.t1.index[i] # start of test set
test_indices=indices[i:j]
maxT1Idx=self.t1.index.searchsorted(self.t1[test_indices].max())
train_indices=self.t1.index.searchsorted(self.t1[self.t1<=t0].index)
if maxT1Idx<X.shape[0]: # right train ( with embargo)
train_indices=np.concatenate((train_indices, indices[maxT1Idx+mbrg:]))
yield train_indices,test_indices
# =======================================================
# CV score implements purgedKfold & embargo (7.4)
def cvScore(clf,X,y,sample_weight,scoring='neg_log_loss',
t1=None,cv=None,cvGen=None,pctEmbargo=None):
if scoring not in ['neg_log_loss','accuracy']:
raise Exception('wrong scoring method.')
from sklearn.metrics import log_loss,accuracy_score
idx = pd.IndexSlice
if cvGen is None:
cvGen=PurgedKFold(n_splits=cv,t1=t1,pctEmbargo=pctEmbargo) # purged
score=[]
for train,test in cvGen.split(X=X):
fit=clf.fit(X=X.iloc[idx[train],:],y=y.iloc[idx[train]],
sample_weight=sample_weight.iloc[idx[train]].values)
if scoring=='neg_log_loss':
prob=fit.predict_proba(X.iloc[idx[test],:])
score_=-log_loss(y.iloc[idx[test]], prob,
sample_weight=sample_weight.iloc[idx[test]].values,
labels=clf.classes_)
else:
pred=fit.predict(X.iloc[idx[test],:])
score_=accuracy_score(y.iloc[idx[test]],pred,
sample_weight=sample_weight.iloc[idx[test]].values)
score.append(score_)
return np.array(score)
# =======================================================
# Plot ROC-AUC for purgedKFold
def crossValPlot(skf,classifier,X,y):
"""Code adapted from:
sklearn crossval example
"""
from itertools import cycle
from sklearn.metrics import roc_curve, auc
from scipy import interp
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
idx = pd.IndexSlice
f,ax = plt.subplots(figsize=(10,7))
i = 0
for train, test in skf.split(X, y):
probas_ = (classifier.fit(X.iloc[idx[train]], y.iloc[idx[train]])
.predict_proba(X.iloc[idx[test]]))
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y.iloc[idx[test]], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
ax.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set_xlim([-0.05, 1.05])
ax.set_ylim([-0.05, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Receiver operating characteristic example')
ax.legend(bbox_to_anchor=(1,1))
#=======================================================
# Feature Importance snippets
#=======================================================
#=======================================================
# 8.2 Mean Decrease Impurity (MDI)
def featImpMDI(fit,featNames):
# feat importance based on IS mean impurity reduction
# only works with tree based classifiers
df0={i:tree.feature_importances_ for i,tree
in enumerate(fit.estimators_)}
df0=pd.DataFrame.from_dict(df0,orient='index')
df0.columns=featNames
df0=df0.replace(0,np.nan) # b/c max_features=1
imp=(pd.concat({'mean':df0.mean(),
'std':df0.std()*df0.shape[0]**-0.5},
axis=1))
imp/=imp['mean'].sum()
return imp
#=======================================================
# 8.3 Mean Decrease Accuracy (MDA)
def featImpMDA(clf,X,y,cv,sample_weight,t1,pctEmbargo,scoring='neg_log_loss'):
# feat imporant based on OOS score reduction
if scoring not in ['neg_log_loss','accuracy']:
raise ValueError('wrong scoring method.')
from sklearn.metrics import log_loss, accuracy_score
cvGen=PurgedKFold(n_splits=cv,t1=t1,pctEmbargo=pctEmbargo) # purged cv
scr0,scr1=pd.SEries(), pd.DataFrame(columns=X.columns)
for i,(train,test) in enumerate(cvGen.split(X=X)):
X0,y0,w0=X.iloc[train,:],y.iloc[train],sample_weight.iloc[train]
X1,y1,w1=X.iloc[test,:],y.iloc[test],sample_weight.iloc[test]
fit=clf.fit(X=X0,y=y0,sample_weight=w0.values)
if scoring=='neg_log_loss':
prob=fit.predict_proba(X1)
scr0.loc[i]=-log_loss(y1,prob,sample_weight=w1.values,
labels=clf.classes_)
else:
pred=fit.predict(X1)
scr0.loc[i]=accuracy_score(y1,pred,sample_weight=w1.values)
for j in X.columns:
X1_=X1.copy(deep=True)
np.random.shuffle(X1_[j].values) # permutation of a single column
if scoring=='neg_log_loss':
prob=fit.predict_proba(X1_)
scr1.loc[i,j]=-log_loss(y1,prob,sample_weight=w1.values,
labels=clf.classes_)
else:
pred=fit.predict(X1_)
scr1.loc[i,j]=accuracy_score(y1,pred,sample_weight=w1.values)
imp=(-scr1).add(scr0,axis=0)
if scoring=='neg_log_loss':imp=imp/-scr1
else: imp=imp/(1.-scr1)
imp=(pd.concat({'mean':imp.mean(),
'std':imp.std()*imp.shape[0]**-0.5},
axis=1))
return imp,scr0.mean()
#=======================================================
# 8.4 Single Feature Importance (SFI)
def auxFeatImpSFI(featNames,clf,trnsX,cont,scoring,cvGen):
imp=pd.DataFrame(columns=['mean','std'])
for featName in featNames:
df0=cvScore(clf,X=trnsX[[featName]],y=cont['bin'],
sample_weight=cont['w'],scoring=scoring,cvGen=cvGen)
imp.loc[featName,'mean']=df0.mean()
imp.loc[featName,'std']=df0.std()*df0.shape[0]**-0.5
return imp
#=======================================================
# 8.5 Computation of Orthogonal Features
def get_eVec(dot,varThres):
# compute eVec from dot proc matrix, reduce dimension
eVal,eVec=np.linalg.eigh(dot)
idx=eVal.argsort()[::-1] # arugments for sorting eVal desc.
eVal,eVec=eVal[idx],eVec[:,idx]
#2) only positive eVals
eVal=(pd.Series(eVal,index=['PC_'+str(i+1)
for i in range(eVal.shape[0])]))
eVec=(pd.DataFrame(eVec,index=dot.index,columns=eVal.index))
eVec=eVec.loc[:,eVal.index]
#3) reduce dimension, form PCs
cumVar=eVal.cumsum()/eVal.sum()
dim=cumVar.values.searchsorted(varThres)
eVal,eVec=eVal.iloc[:dim+1],eVec.iloc[:,:dim+1]
return eVal,eVec
def orthoFeats(dfx,varThres=0.95):
# given a DataFrame, dfx, of features, compute orthofeatures dfP
dfZ=dfx.sub(dfx.mean(),axis=1).div(dfx.std(),axis=1) # standardize
dot=(pd.DataFrame(np.dot(dfZ.T,dfZ),
index=dfx.columns,
columns=dfx.columns))
eVal,eVec=get_eVec(dot,varThres)
dfP=np.dot(dfZ,eVec)
return dfP
#=======================================================
# 8.6 Computation of weighted kendall's tau between feature importance and inverse PCA ranking
#from scipy.stats import weightedtau
#featImp=np.array([0.55,0.33,0.07,0.05]) # feature importance
#pcRank=np.array([1,2,3,4],dtype=np.float) # PCA rank
#weightedtau(featImp,pcRank**-1)[0]
#=======================================================
# 8.7 Creating a Synthetic Dataset
def getTestData(n_features=40,n_informative=10,n_redundant=10,n_samples=10_000):
# generate a random dataset for a classification problem
from sklearn.datasets import make_classification
kwds=dict(n_samples=n_samples,n_features=n_feautres,
n_informative=n_informative,n_redundant=n_redundant,
random_state=0,shuffle=False)
trnsX,cont=make_classification(**kwds)
df0=(pd.DatetimeIndex(periods=n_samples, freq=pd.tseries.offsets.BDay(),
end=pd.datetime.today()))
trnsX,cont=(pd.DataFrame(trnsX,index=df0),
pd.Series(cont,index=df0).to_frame('bin'))
df0=['I_'+str(i) for i in range(n_informative)]+['R_'+str(i) for i in range(n_redundant)]
df0+=['N_'+str(i) for i in range(n_features-len(df0))]
trnsX.columns=df0
cont['w']=1./cont.shape[0]
cont['t1']=pd.Series(cont.index,index=cont.index)
return trnsX,cont
#=======================================================
# 8.8 Calling Feature Importance for Any Method
def featImportances(trnsX,cont,n_estimators=1000,cv=10,
max_samples=1.,numThreads=11,pctEmbargo=0,
scoring='accuracy',method='SFI',minWLeaf=0.,**kargs):
# feature importance from a random forest
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
#from mpEngine import mpPandasObj
n_jobs=(-1 if numThreads>1 else 1) # run 1 thread w/ ht_helper in dirac1
#1) prepare classifier,cv. max_features=1, to prevent masking
clf=DecisionTreeClassifier(criterion='entropy',max_features=1,
class_weight='balanced',
min_weight_fraction_leaf=minWLeaf)
clf=BaggingClassifier(base_estimator=clf,n_estimators=n_estimators,
max_features=1.,max_samples=max_samples,
oob_score=True,n_jobs=n_jobs)
fit=clf.fit(X=trnsX,y=cont['bin'],sample_weight=cont['w'].values)
oob=fit.oob_score_
if method=='MDI':
imp=featImpMDI(fit,featNames=trnsX.columns)
oos=cvScore(clf,X=trnsX,y=cont['bin'],cv=cv,sample_weight=cont['w'],
t1=cont['t1'],pctEmbargo=pctEmbargo,scoring=scoring).mean()
elif method=='MDA':
imp,oos=featImpMDA(clf,X=trnsX,y=cont['bin'],cv=cv,
sample_weight=cont['w'],t1=cont['t1'],
pctEmbargo=pctEmbargo,scoring=scoring)
elif method=='SFI':
cvGen=PurgedKFold(n_splits=cv,t1=cont['t1'],pctEmbargo=pctEmbargo)
oos=cvScore(clf,X=trnsX,y=cont['bin'],sample_weight=cont['w'],
scoring=scoring,cvGen=cvGen).mean()
clf.n_jobs=1 # parallelize auxFeatImpSFI rather than clf
imp=pmPandasObj(auxFeatImpSFI,('featNames',trnsX.columns),numThreads,
clf=clf,trnsX=trnsX,cont=cont,scoring=scoring,cvGen=cvGen)
return imp,oob,oos
#=======================================================
# 8.9 Calling All Components
def testFunc(n_features=40,n_informative=10,n_redundant=10,n_estimators=1000,
n_samples=10000,cv=10):
# test the performance of the feat importance functions on artificial data
# Nr noise features = n_featurs-n_informative-n_redundant
trnsX,cont=getTestData(n_features,n_informative,n_redundant,n_samples)
dict0={'minWLeaf':[0.],'scoring':['accuracy'],'method':['MDI','MDA','SFI'],
'max_samples':[1.]}
jobs,out=(dict(zip(dict0,i))for i in product(*dict0.values())),[]
kargs={'pathOut':PurePath(pdir/'testFunc').as_posix(),
'n_estimators':n_estimators,'tag':'testFunc','cv':cv}
for job in jobs:
job['simNum']=job['method']+'_'+job['scoring']+'_'+'%.2f'%job['minWLeaf']+\
'_'+str(job['max_samples'])
print(job['simNum'])
kargs.update(job)
imp,oob,oos=featImportance(trnsX=trnsX,cont=cont,**kargs)
plotFeatImportance(imp=imp,oob=oob,oos=oos,**kargs)
df0=imp[['mean']]/imp['mean'].abs().sum()
df0['type']=[i[0] for i in df0.index]
df0=df0.groupby('type')['mean'].abs().sum()
df0.update({'oob':oob,'oos':oos});df0.update(job)
out.append(df0)
out=( | pd.DataFrame(out) | pandas.DataFrame |
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
MONTHS = ["january","february", "march", "april",\
"may","june","all"]
DAYS = ["monday","tuesday","wednesday","thursday",\
"friday","saturday","sunday","all"]
def get_user_input(input_variable, allowed_inputs):
"""
Asks user to specify an input_variable
among the list of allowed_inputs.
Returns:
(str) variable - value of the variable
"""
# Enter again flag
enter_again = False
keep_asking = True
while keep_asking or enter_again:
print("Enter variable - {}".format(input_variable))
print("Allowed values are - \n\t{}".format("\n\t".join(allowed_inputs)))
# Take user input
variable = input()
# Remove whitespace and change to lower case
variable = variable.strip().lower()
# Check if variable is in allowed_inputs
if variable in allowed_inputs:
enter_again = False
print("You selected: {} = {}".format(input_variable, variable))
choice_enter_again = input("Do you want to continue with this choice? Press y or Y to confirm. Press any other key to enter new value\n").strip().lower()
if choice_enter_again != "" and choice_enter_again[0]=='y':
keep_asking = False
else:
keep_asking = True
enter_again = True
else:
print("Invalid input. Try again.")
return variable
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = get_user_input("City", CITY_DATA.keys())
# TO DO: get user input for month (all, january, february, ... , june)
month = get_user_input("Month", MONTHS)
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = get_user_input("Weekday", DAYS)
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - pandas DataFrame containing city data filtered by month and day
"""
# load data file into a dataframe
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month and day of week from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
# filter by month if applicable
if month != 'all':
# use the index of the months list to get the corresponding int
months = ['january', 'february', 'march', 'april', 'may', 'june']
month = months.index(month)+1
# filter by month to create the new dataframe
df = df[df['month']==month]
# filter by day of week if applicable
if day != 'all':
# filter by day of week to create the new dataframe
df = df[df['day_of_week']==day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# Convert 'Start Time' to the datetime datatype
df['Start Time'] = | pd.to_datetime(df['Start Time']) | pandas.to_datetime |
import streamlit as st
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from PIL import Image
from scrapping import DataFetcher
from models import Models
from seir import SeirModel
corona = Image.open('images/title_logo.png')
st.image(corona)
st.write('Bem-vindo ao dashboard de acompanhamento de casos do Coronavírus no Brasil.')
st.write('Os dados apresentados aqui são disponibilizados por meio de APIs públicas. Para ver as fontes, acesse o repositório do projeto no Github. A atualização dos dados acontece várias vezes ao dia. Apenas números confirmados são apresentados.')
st.write('Utilize o menu ao lado para fazer a navegação.')
st.write('A maioria dos gráficos apresentados aqui são interativos: mostram valores ao passar do mouse, permitem zoom e posição de tela cheia. Explore os dados com o mouse!')
st.sidebar.title('Navegação')
actions = ['Situação atual', 'Previsões']
choice = st.sidebar.selectbox('Selecione uma opção', actions)
with st.spinner('Buscando dados...'):
data = DataFetcher()
st.write('________________________________')
brazil_general_code, brazil_states_code, world_cases_code = data.get_apis_status_code()
if choice == 'Situação atual':
if brazil_general_code == 200:
date, time = data.get_update_time()
st.write('<i>Dados atualizados em </i>', date, ' <i>às</i> ', time, unsafe_allow_html=True)
st.text('')
st.text('')
total_cases, deaths, recovers = data.get_main_counters()
st.write('<b>Casos totais até o momento: </b>', total_cases, unsafe_allow_html=True)
st.write('<b>Mortes confirmadas: </b>', deaths, unsafe_allow_html=True)
st.write('<b>Pessoas recuperadas: </b>', recovers, unsafe_allow_html=True)
else:
st.write("Dados indisponíveis no momento...")
if world_cases_code == 200:
cases_df = data.get_cases_timeline()
fig_daily_cases = go.Figure(data=go.Bar(x=cases_df['date'], y=cases_df['daily']))
fig_daily_cases.update_layout(title={'text':'<b>Novos casos por dia</b>', 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'},
yaxis_title='Novos casos confirmados',
margin=dict(b=0, t=70))
st.plotly_chart(fig_daily_cases, use_container_width=True)
fig_cumulative_cases = go.Figure()
fig_cumulative_cases.add_trace(go.Scatter(x=cases_df['date'], y=cases_df['confirmed'],
line=dict(color='#17becf', width=5),
mode='lines+markers', marker=dict(size=8), name='Confirmados', fill='tozeroy'))
fig_cumulative_cases.add_trace(go.Scatter(x=cases_df['date'], y=cases_df['deaths'],
line=dict(color='firebrick', width=5),
mode='lines+markers', marker=dict(size=8), name='Mortes', fill='tozeroy'))
fig_cumulative_cases.update_layout(title={'text':'<b>Casos e mortes acumulado</b>', 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'},
yaxis_title='',
margin=dict(t=70), legend_orientation='h')
st.plotly_chart(fig_cumulative_cases, use_container_width=True)
else:
st.write("Dados indisponíveis no momento...")
if brazil_states_code == 200:
cases_by_state, _ = data.get_state_cases()
st.text('')
st.header('Situação dos estados')
### Using plotly table
fig_states_table = go.Figure(data=[go.Table(
columnwidth = [600,600],
header=dict(values=list(cases_by_state.columns),
fill_color='lightblue',
align='center'),
cells=dict(values=[cases_by_state['Estado'], cases_by_state['Casos Confirmados'],
cases_by_state['Mortes'], cases_by_state['Letalidade']],
fill_color='lavender',
align='center')
)
])
fig_states_table.update_layout(margin=dict(l=0, r=0, t=10, b=0))
st.plotly_chart(fig_states_table, use_container_width=True, config={'displayModeBar': False})
fig_state_cases = data.get_states_cases_plot()
st.plotly_chart(fig_state_cases, use_container_width=True)
else:
st.write("Dados indisponíveis no momento...")
if choice == 'Previsões':
st.sidebar.title('Selecione o modelo')
# Modelo auto-regressivo e SEIR necessitam de ser desenvolvidos!
model = st.sidebar.radio('', ['Exponencial e Polinomial', 'Rede Neural Artificial', 'SEIR (Simulação)'])
if model == 'Exponencial e Polinomial':
st.markdown('## Modelo Exponencial')
st.write('O modelo exponencial é indicado para modelar a epidemia nos seus estágios iniciais.')
st.write('Contudo, a análise da adequação da curva de casos ao modelo exponencial nos informa a respeito das medidas de contenção que estão sendo adotadas.')
st.write('Caso o ajuste ao modelo não seja satisfatório, significa que as medidas de contenção estão surtindo efeito em freiar a epidemia que, caso as medidas de contenção fossem inexistentes, teria seu número casos acompanhando a curva exponencial.')
st.write('Clique em "*compare data on hover*" para comparar a previsão e o real para cada dia.')
model = Models()
cases_last_20days, predictions, r2 = model.get_exponential_predictions()
cases_df = data.get_cases_timeline()
# Quality of last 10 days fitting to exponential model plot
fig_quality = go.Figure()
fig_quality.add_trace(go.Scatter(x=cases_last_20days['date'], y=cases_last_20days['log_cases'], line=dict(color='firebrick', width=4),
mode='lines+markers', marker=dict(size=10), name='Real'))
fig_quality.add_trace(go.Scatter(x=cases_last_20days['date'], y=np.log(predictions['pred_cases']), name='Ajustado'))
fig_quality.update_layout(title={'text': '<b>Qualidade do ajuste exponencial em janela de 20 dias</b><br>(R² = {})'.format(r2), 'x': 0.5, 'xanchor': 'center'},
yaxis_title='log (casos totais)', legend=dict(x=.1, y=0.9))
st.plotly_chart(fig_quality, use_container_width=True)
# Number of cases with predictions plot
fig_pred = go.Figure()
fig_pred.add_trace(go.Scatter(x=cases_df['date'], y=cases_df['confirmed'], line=dict(color='#7f7f7f', width=4),
mode='lines+markers', marker=dict(size=10), name='Dados'))
fig_pred.add_trace(go.Scatter(x=predictions['date'], y=predictions['pred_cases'], name='Ajuste', line=dict(color='red')))
fig_pred.update_layout(title={'text':'<b>Ajuste exponencial com previsão para os próximos 7 dias</b>', 'x': 0.5, 'xanchor': 'center'},
yaxis_title='Casos totais', legend=dict(x=.1, y=0.9))
st.plotly_chart(fig_pred, use_container_width=True)
st.markdown('## Modelo Polinomial')
st.write('''
O modelo polinomial não força a curva a um ajuste exponencial. Portanto, tem a característica de proporcionar um ajuste mais "suave",
capaz de captar as tendências mais recentes. Para este ajuste, está sendo utilizado um modelo polinomial de terceiro grau.
Clique em "*compare data on hover*" para comparar a previsão e o real para cada dia.
''')
polinomial_predictions = model.get_polinomial_predictions()
fig_pred_poli = go.Figure()
fig_pred_poli.add_trace(go.Scatter(x=cases_df['date'], y=cases_df['confirmed'], line=dict(color='#7f7f7f', width=4),
mode='lines+markers', marker=dict(size=10), name='Dados'))
fig_pred_poli.add_trace(go.Scatter(x=polinomial_predictions['date'], y=polinomial_predictions['pred_cases'], name='Ajuste', line=dict(color='green')))
fig_pred_poli.update_layout(title={'text':'<b>Ajuste polinomial com previsão para os próximos 7 dias</b>', 'x': 0.5, 'xanchor': 'center'},
yaxis_title='Casos totais', legend=dict(x=.1, y=0.9))
st.plotly_chart(fig_pred_poli, use_container_width=True)
if model == 'Rede Neural Artificial':
st.markdown('## Rede Neural Artificial')
st.write('Em desenvolvimento...')
if model == 'SEIR (Simulação)':
total_cases, deaths, recovers = data.get_main_counters()
st.markdown('## Modelo SEIR')
st.write(
'''
SEIR é um modelo comportamental em epidemiologia que busca modelar como uma doença se espalha através de uma população.
SEIR é um acrônimo para **S**usceptible, **E**xposed, **I**nfected, **R**ecovered, ou em português: Suscetíveis, Expostos, Infectados e Recuperados.
A ideia básica é que, quando uma doença é introduzida em uma população, as pessoas se movem de um estágio do modelo para o outro. Ou seja, as pessoas suscetíveis podem se expor ao vírus, contraí-lo e eventualmente se recuperar ou padecer.
''')
seir_image = Image.open('images/seir.png')
st.image(seir_image, use_column_width=True)
st.write(
'''
A modelagem leva em consideração três parâmetros principais: $\\alpha$, $\\beta$ e $\\gamma$.
* $\\alpha$ é o inverso do período de incubação do vírus. Tempo de incubação é o período em que o vírus fica no corpo da pessoa sem produzir sintomas.
* $\\beta$ é a taxa de contato médio na população. Este é o parâmetro influenciado por medidas de contenção social.
* $\\gamma$ é o inverso da média do período de infecção. Período de infecção é o tempo em que uma pessoa fica acometida pelo vírus e pode transmití-lo.
Para essa modelagem, o valor de cada parâmetro foi retirado de artigos publicados na área, especificamente:
* [Epidemic analysis of COVID-19 in China by dynamical modeling](https://arxiv.org/pdf/2002.06563.pdf)
* [Impact of non-pharmaceutical interventions (NPIs) to reduce COVID19 mortality and healthcare demand](https://www.imperial.ac.uk/media/imperial-college/medicine/sph/ide/gida-fellowships/Imperial-College-COVID19-NPI-modelling-16-03-2020.pdf)
''')
st.markdown('### Como a modelagem foi feita para o Brasil')
st.write(
'''
Para o caso do Brasil, foi considerada uma população de 200 milhões de pessoas, sendo o número inicial de infectados
o número total de casos mais recentes que temos. Foi considerado que toda a população é suscetível e que, inicialmente,
o número de pessoas expostas (que contraíram o vírus mas estão em período de incubação) é 15 vezes o número de casos confirmados.
O fator 15 foi retirado de uma estimativa realizada em declarações do Ministério da Saúde.
A simulação sempre parte do cenário mais atual, ou seja, do dia de hoje considerando os números mais atualizados que temos.
O objetivo é tentar prever o cenário futuro baseado nos números mais recentes que temos e também demonstrar, neste cenário,
o impacto das medidas de isolamento social. Na simulação, o fator de contenção social foi levado em conta por meio do parâmetro *p*,
que possui valores entre 0 e 1. O valor de *p* = 1 seria o caso em que nenhuma medida de contenção social é adotada, ou seja, a vida cotinua normalmente.
O valor de *p* = 0 é apenas teórico, pois significaria zerar a taxa de transmissão do vírus, ou seja, absolutamente nenhuma transmissão entre a população.
A seguir é possível verificar os resultados da simulação para o cenário brasileiro, partindo de hoje e considerando os números mais recentes.
''')
seir_model = SeirModel(100, total_cases, recovered=deaths+recovers, p=1)
S, E, I, R = seir_model.get_model_results()
population = seir_model.N
# Prepare dates for plotting
start = | pd.Timestamp('now') | pandas.Timestamp |
import pandas as pd
from pandas.testing import assert_frame_equal
from dtspec.core import markdown_to_df
# pylint: disable=redefined-outer-name
def test_convert_table_to_df():
given = """
| id | name |
| - | - |
| 1 | one |
| 2 | two |
| 3 | three |
"""
expected = pd.DataFrame({"id": ["1", "2", "3"], "name": ["one", "two", "three"]})
actual = markdown_to_df(given)
assert_frame_equal(actual, expected)
def test_convert_table_to_df_with_blanks():
given = """
| id | name |
| - | - |
| 1 | one |
| 2 | |
| 3 | three |
"""
expected = pd.DataFrame({"id": ["1", "2", "3"], "name": ["one", "", "three"]})
actual = markdown_to_df(given)
assert_frame_equal(actual, expected)
def test_ignores_trailing_comments():
given = """
| id | name |
| - | - |
| 1 | one |
| 2 | two | # Some comment
| 3 | three |
"""
expected = pd.DataFrame({"id": ["1", "2", "3"], "name": ["one", "two", "three"]})
actual = markdown_to_df(given)
assert_frame_equal(actual, expected)
def test_honors_embedded_octothorpes():
given = """
| id | name |
| - | - |
| 1 | one |
| 2 | #2 |
| 3 | three |
"""
expected = | pd.DataFrame({"id": ["1", "2", "3"], "name": ["one", "#2", "three"]}) | pandas.DataFrame |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
_testing as tm,
concat,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
pytestmark = pytest.mark.single
def test_format_type(setup_path):
df = DataFrame({"A": [1, 2]})
with ensure_clean_path(setup_path) as path:
with HDFStore(path) as store:
store.put("a", df, format="fixed")
store.put("b", df, format="table")
assert store.get_storer("a").format_type == "fixed"
assert store.get_storer("b").format_type == "table"
def test_format_kwarg_in_constructor(setup_path):
# GH 13291
msg = "format is not a defined argument for HDFStore"
with tm.ensure_clean(setup_path) as path:
with pytest.raises(ValueError, match=msg):
HDFStore(path, format="table")
def test_api_default_format(setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_put(setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError, match=msg):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError, match=msg):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(setup_path):
with ensure_clean_store(setup_path) as store:
index = Index([f"I am a very long string index: {i}" for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ [f"I am a very long string index: {i}" for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
msg = "Compression not supported on Fixed format stores"
with pytest.raises(ValueError, match=msg):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(setup_path):
df = tm.makeTimeDataFrame()
with | ensure_clean_store(setup_path) | pandas.tests.io.pytables.common.ensure_clean_store |
from datetime import date
from flask import Flask, render_template, request, g
import json
import os
import socket
import pandas as pd
from redis import Redis
app = Flask(__name__)
redisurl = os.getenv('REDIS_URL')
hostname = socket.gethostname()
# Button Colour
buttoncolour = "blue"
button = './static/{}.png'.format(buttoncolour)
def get_redis():
if not hasattr(g, 'redis'):
g.redis = Redis(host=redisurl, db=0, socket_timeout=5, decode_responses=True)
return g.redis
def generate_table():
redis = get_redis()
result = pd.DataFrame(columns=['Date', 'Clicks'])
keys = redis.keys('*')
for key in keys:
val = redis.get(key)
raw = json.dumps([{'Date': key, 'Clicks': val}])
df = | pd.read_json(raw) | pandas.read_json |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(self, setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[2:7, "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[2:7, "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
with pytest.raises(Exception):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
with pytest.raises(ValueError):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[4:6, "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame(dict(A=range(5), B=range(5)))
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# exceptions
with pytest.raises(ValueError):
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df3"
)
with pytest.raises(ValueError):
store.append_to_multiple({"df1": None, "df2": None}, df, selector="df3")
with pytest.raises(ValueError):
store.append_to_multiple("df1", df, "df1")
# regular operation
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1"
)
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True
)
result = store.select_as_multiple(["df1", "df2"])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
@pytest.mark.xfail(
run=False, reason="append_to_multiple_dropna_false is not raising as failed"
)
def test_append_to_multiple_dropna_false(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
)
with pytest.raises(ValueError):
store.select_as_multiple(["df1a", "df2a"])
assert not store.select("df1a").index.equals(store.select("df2a").index)
def test_select_as_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
# no tables stored
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
# exceptions
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
with pytest.raises(Exception):
store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
msg = "'No object named df3 in the file'"
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
with pytest.raises(KeyError, match="'No object named df4 in the file'"):
store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df4"
)
# default select
result = store.select("df1", ["A>0", "B>0"])
expected = store.select_as_multiple(
["df1"], where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
"df1", where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
["df1", "df2"], where="index>df2.index[4]", selector="df2"
)
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test exception for diff rows
store.append("df3", tm.makeTimeDataFrame(nper=50))
with pytest.raises(ValueError):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
def test_nan_selection_bug_4858(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(
dict(cols=["13.0", "14.0", "15.0"], values=[3.0, 4.0, 5.0]),
index=[3, 4, 5],
)
# write w/o the index on that particular column
store.append("df", df, data_columns=True, index=["cols"])
result = store.select("df", where="values>2.0")
tm.assert_frame_equal(result, expected)
def test_start_stop_table(self, setup_path):
with ensure_clean_store(setup_path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append("df", df)
result = store.select("df", "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ["A"]]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ["A"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self, setup_path):
# GH 16209
with ensure_clean_store(setup_path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple(
{"selector": ["foo"], "data": None}, df, selector="selector"
)
result = store.select_as_multiple(
["selector", "data"], selector="selector", start=0, stop=1
)
expected = df.loc[[0], ["foo", "bar"]]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self, setup_path):
with ensure_clean_store(setup_path) as store:
# fixed, GH 8287
df = DataFrame(
dict(A=np.random.rand(20), B=np.random.rand(20)),
index=pd.date_range("20130101", periods=20),
)
store.put("df", df)
result = store.select("df", start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select("df", start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select("df", start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put("s", s)
result = store.select("s", start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select("s", start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
def test_select_filter_corner(self, setup_path):
df = DataFrame(np.random.randn(50, 100))
df.index = ["{c:3d}".format(c=c) for c in df.index]
df.columns = ["{c:3d}".format(c=c) for c in df.columns]
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
crit = "columns=df.columns[:75]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = "columns=df.columns[:75:2]"
result = store.select("frame", [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop, setup_path):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame(
{
"a": Series([20111010, 20111011, 20111012]),
"b": Series(["ab", "cd", "ab"]),
}
)
with ensure_clean_store(setup_path) as store:
store.append("test_dataset", df)
result = store.select("test_dataset", start=start, stop=stop)
tm.assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self, setup_path):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, "df"), lambda p: pd.read_hdf(p, "df")
)
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self, setup_path):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, "df")
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, "df")
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def _check_roundtrip(self, obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(
self, obj, comparator, path, compression=False, **kwargs
):
options = {}
if compression:
options["complib"] = compression or _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
store["obj"] = retrieved
again = store["obj"]
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, path, compression=False):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store.put("obj", obj, format="table")
retrieved = store["obj"]
comparator(retrieved, obj)
def test_multiple_open_close(self, setup_path):
# gh-4409: open & close multiple times
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
# single
store = HDFStore(path)
assert "CLOSED" not in store.info()
assert store.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
with ensure_clean_path(setup_path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
with pytest.raises(ValueError):
HDFStore(path)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert "CLOSED" not in store1.info()
assert "CLOSED" not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert "CLOSED" in store1.info()
assert not store1.is_open
assert "CLOSED" not in store2.info()
assert store2.is_open
store2.close()
assert "CLOSED" in store1.info()
assert "CLOSED" in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store2.append("df2", df)
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode="w")
store.append("df", df)
store2 = HDFStore(path)
store.close()
assert "CLOSED" in store.info()
assert not store.is_open
store2.close()
assert "CLOSED" in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", mode="w", format="table")
store = HDFStore(path)
store.close()
with pytest.raises(ClosedFileError):
store.keys()
with pytest.raises(ClosedFileError):
"df" in store
with pytest.raises(ClosedFileError):
len(store)
with pytest.raises(ClosedFileError):
store["df"]
with pytest.raises(AttributeError):
store.df
with pytest.raises(ClosedFileError):
store.select("df")
with pytest.raises(ClosedFileError):
store.get("df")
with pytest.raises(ClosedFileError):
store.append("df2", df)
with pytest.raises(ClosedFileError):
store.put("df3", df)
with pytest.raises(ClosedFileError):
store.get_storer("df2")
with pytest.raises(ClosedFileError):
store.remove("df2")
with pytest.raises(ClosedFileError, match="file is not open"):
store.select("df")
def test_pytables_native_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
) as store:
d2 = store["detector/readout"]
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(
is_platform_windows(), reason="native2 read fails oddly on windows"
)
def test_pytables_native2_read(self, datapath, setup_path):
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
) as store:
str(store)
d1 = store["detector"]
assert isinstance(d1, DataFrame)
@td.xfail_non_writeable
def test_legacy_table_fixed_format_read_py2(self, datapath, setup_path):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r"
) as store:
result = store.select("df")
expected = pd.DataFrame(
[[1, 2, 3, "D"]],
columns=["A", "B", "C", "D"],
index=pd.Index(["ABC"], name="INDEX_NAME"),
)
tm.assert_frame_equal(expected, result)
def test_legacy_table_read_py2(self, datapath, setup_path):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r"
) as store:
result = store.select("table")
expected = pd.DataFrame({"a": ["a", "b"], "b": [2, 3]})
tm.assert_frame_equal(expected, result)
def test_copy(self, setup_path):
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None, propindexes=True, **kwargs):
try:
store = HDFStore(f, "r")
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(
new_f, keys=keys, propindexes=propindexes, **kwargs
)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except (OSError, ValueError):
pass
safe_remove(new_f)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(setup_path)
st = HDFStore(path)
st.append("df", df, data_columns=["A"])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
finally:
safe_remove(path)
def test_store_datetime_fractional_secs(self, setup_path):
with ensure_clean_store(setup_path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store["a"] = series
assert store["a"].index[0] == dt
def test_tseries_indices_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
def test_tseries_indices_frame(self, setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store["a"] = df
result = store["a"]
tm.assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index, obj="dataframe index")
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store["a"] = df
result = store["a"]
tm.assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index, obj="dataframe index")
def test_unicode_index(self, setup_path):
unicode_values = ["\u03c3", "\u03c3\u03c3"]
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_unicode_longer_encoded(self, setup_path):
# GH 11234
char = "\u0394"
df = | pd.DataFrame({"A": [char]}) | pandas.DataFrame |
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# Script designed to visulize the working time distribution from your time logger.
# Copyright © 2020 <NAME>
# Licensed under MIT License
import sys
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
f_name = sys.argv[1]
precision = str(sys.argv[2])
df = | pd.read_csv(f_name) | pandas.read_csv |
from collections import defaultdict
import pandas as pd
prefixs = defaultdict(int)
# https://www.kaggle.com/imoore/titanic-the-only-notebook-you-need-to-see/notebook
personal_titles = set(t.lower() for t in ["Mr.",
# "Miss.", != Miss
"Mrs.",
])
def extract_feature(X):
# Int64Index: 1309 entries, 0 to 417
# Data columns (total 12 columns):
# # Column Non-Null Count Dtype
# --- ------ -------------- -----
# 0 PassengerId 1309 non-null int64
# 1 Survived 891 non-null float64
# 2 Pclass 1309 non-null int64
# 3 Name 1309 non-null object
# 4 Sex 1309 non-null object
# 5 Age 1046 non-null float64
# 6 SibSp 1309 non-null int64
# 7 Parch 1309 non-null int64
# 8 Ticket 1309 non-null object
# 9 Fare 1308 non-null float64
# 10 Cabin 295 non-null object
# 11 Embarked 1307 non-null object
# Name ...
X['NameWordCount'] = X['Name'].apply(lambda x: len(x.split()))
X['Personal Title'] = X['Name'].apply(extract_personal_title)
# For numeric data, "Age" missing 20% records, "Fare" missing only 1 record.
# Age of couple might be close
# Name, Age, Ticket
# Clark, Mr. <NAME> 27 13508
# Clark, Mrs. <NAME> (<NAME>) 26 13508
X['Age'] = X['Age'].fillna(X['Age'].median())
X['Fare'] = X['Fare'].fillna(X['Fare'].median())
# For categorical data, "Embarked" missing 2 records, "Cabin" missing 77% records.
# Most of "Embarked" are "S"
# Do NOT turn it into integer, which performs bad ...
# X['Embarked'] = X['Embarked'].fillna('S')
X['CabinInitChar'] = X['Cabin'].fillna(
'').apply(extract_cabin_initial_char)
X['CabinNumber'] = X['Cabin'].fillna('').apply(extract_cabin_number)
X['TicketPrefix'] = X['Ticket'].fillna('').apply(extract_ticket_prefix)
X['TicketNumber'] = X['Ticket'].fillna('').apply(extract_ticket_number)
X = X.drop(columns=['Name', 'Ticket', 'Cabin'])
return | pd.get_dummies(X) | pandas.get_dummies |
__author__ = "saeedamen" # <NAME>
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on a "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import numpy as np
import pandas as pd
import datetime
from datetime import timedelta
from pandas.tseries.offsets import BDay, CustomBusinessDay, Day, \
CustomBusinessMonthEnd, DateOffset
from findatapy.timeseries.timezone import Timezone
from findatapy.util.dataconstants import DataConstants
from findatapy.util.loggermanager import LoggerManager
constants = DataConstants()
# To speed up CustomBusinessDay
# https://stackoverflow.com/questions/31523302/performance-of-pandas-custom-business-day-offset
class Calendar(object):
"""Provides calendar based functions for working out options expiries,
holidays etc. Note, that at present, the expiry _calculations are
approximate.
"""
# Approximate mapping from tenor to number of business days
_tenor_bus_day_dict = {'ON': 1,
'TN': 2,
'1W': 5,
'2W': 10,
'3W': 15,
'1M': 20,
'2M': 40,
'3M': 60,
'4M': 80,
'6M': 120,
'9M': 180,
'1Y': 252,
'2Y': 252 * 2,
'3Y': 252 * 3,
'5Y': 252 * 5
}
def __init__(self):
self._holiday_df = pd.read_parquet(constants.holidays_parquet_table)
def flatten_list_of_lists(self, list_of_lists):
"""Flattens lists of obj, into a single list of strings (rather than
characters, which is default behavior).
Parameters
----------
list_of_lists : obj (list)
List to be flattened
Returns
-------
str (list)
"""
if isinstance(list_of_lists, list):
rt = []
for i in list_of_lists:
if isinstance(i, list):
rt.extend(self.flatten_list_of_lists(i))
else:
rt.append(i)
return rt
return list_of_lists
def _get_full_cal(self, cal):
holidays_list = []
# Calendars which have been hardcoded in the parquet file (which users
# may also edit)
if len(cal) == 6:
# Eg. EURUSD (load EUR and USD calendars and combine the holidays)
holidays_list.append(
[self._get_full_cal(cal[0:3]), self._get_full_cal(cal[3:6])])
elif len(cal) == 9:
holidays_list.append(
[self._get_full_cal(cal[0:3]), self._get_full_cal(cal[3:6]),
self._get_full_cal(cal[6:9])])
else:
if cal == 'FX' or cal == 'NYX':
# Filter for Christmas & New Year's Day
for i in range(1999, 2025):
holidays_list.append(pd.Timestamp(str(i) + "-12-25"))
holidays_list.append(pd.Timestamp(str(i) + "-01-01"))
elif cal == 'NYD' or cal == 'NEWYEARSDAY':
# Filter for New Year's Day
for i in range(1999, 2025):
holidays_list.append(pd.Timestamp(str(i) + "-01-01"))
elif cal == 'WDY' or cal == 'WEEKDAY':
bday = CustomBusinessDay(weekmask='Sat Sun')
holidays_list.append([x for x in pd.date_range('01 Jan 1999',
'31 Dec 2025',
freq=bday)])
elif cal == 'WKD': #
pass
# holidays_list.append()
else:
label = cal + ".holiday-dates"
try:
holidays_list = self._holiday_df[label].dropna().tolist()
except:
logger = LoggerManager().getLogger(__name__)
logger.warning(cal + " holiday calendar not found.")
return holidays_list
def create_calendar_bus_days(self, start_date, end_date, cal='FX'):
"""Creates a calendar of business days
Parameters
----------
start_date : DateTime
start date of calendar
end_date : DataFrame
finish date of calendar
cal : str
business calendar to use
Returns
-------
list
"""
hols = self.get_holidays(start_date=start_date, end_date=end_date,
cal=cal)
return pd.bdate_range(start=start_date, end=end_date, freq='D',
holidays=hols)
def get_holidays(self, start_date=None, end_date=None, cal='FX',
holidays_list=[]):
"""Gets the holidays for a given calendar
Parameters
----------
start_date : DateTime
start date of calendar
end_date : DataFrame
finish date of calendar
cal : str
business calendar to use
Returns
-------
list
"""
# holidays_list , = []
# TODO use Pandas CustomBusinessDays to get more calendars
holidays_list = self._get_full_cal(cal)
# .append(lst)
# Use 'set' so we don't have duplicate dates if we are incorporating
# multiple calendars
holidays_list = np.array(
list(set(self.flatten_list_of_lists(holidays_list))))
holidays_list = pd.to_datetime(
holidays_list).sort_values().tz_localize('UTC')
# Floor start date
if start_date is not None:
start_date = pd.Timestamp(start_date).floor('D')
try:
start_date = start_date.tz_localize('UTC')
except:
pass
holidays_list = holidays_list[(holidays_list >= start_date)]
if end_date is not None:
# Ceiling end date
end_date = pd.Timestamp(end_date).ceil('D')
try:
end_date = end_date.tz_localize('UTC')
except:
pass
holidays_list = holidays_list[(holidays_list <= end_date)]
# Remove all weekends unless it is WEEKDAY calendar
if cal != 'WEEKDAY' or cal != 'WKY':
holidays_list = holidays_list[holidays_list.dayofweek <= 4]
return holidays_list
def get_business_days_tenor(self, tenor):
if tenor in self._tenor_bus_day_dict.keys():
return self._tenor_bus_day_dict[tenor]
return None
def get_dates_from_tenors(self, start, end, tenor, cal=None):
freq = str(self.get_business_days_tenor(tenor)) + "B"
return pd.DataFrame(index=pd.bdate_range(start, end, freq=freq))
def get_delta_between_dates(self, date1, date2, unit='days'):
if unit == 'days':
return (date2 - date1).days
def get_delivery_date_from_horizon_date(self, horizon_date, tenor,
cal=None, asset_class='fx'):
if 'fx' in asset_class:
tenor_unit = ''.join(re.compile(r'\D+').findall(tenor))
asset_holidays = self.get_holidays(cal=cal)
if tenor_unit == 'ON':
return horizon_date + CustomBusinessDay(n=1,
holidays=asset_holidays)
elif tenor_unit == 'TN':
return horizon_date + CustomBusinessDay(n=2,
holidays=asset_holidays)
elif tenor_unit == 'SP':
pass
elif tenor_unit == 'SN':
tenor_unit = 'D'
tenor_digit = 1
else:
tenor_digit = int(''.join(re.compile(r'\d+').findall(tenor)))
horizon_date = self.get_spot_date_from_horizon_date(
horizon_date, cal, asset_holidays=asset_holidays)
if 'SP' in tenor_unit:
return horizon_date
elif tenor_unit == 'D':
return horizon_date + CustomBusinessDay(
n=tenor_digit, holidays=asset_holidays)
elif tenor_unit == 'W':
return horizon_date + Day(
n=tenor_digit * 7) + CustomBusinessDay(
n=0, holidays=asset_holidays)
else:
if tenor_unit == 'Y':
tenor_digit = tenor_digit * 12
horizon_period_end = horizon_date + CustomBusinessMonthEnd(
tenor_digit + 1)
horizon_floating = horizon_date + DateOffset(
months=tenor_digit)
cbd = CustomBusinessDay(n=1, holidays=asset_holidays)
delivery_date = []
if isinstance(horizon_period_end, pd.Timestamp):
horizon_period_end = [horizon_period_end]
if isinstance(horizon_floating, pd.Timestamp):
horizon_floating = [horizon_floating]
for period_end, floating in zip(horizon_period_end,
horizon_floating):
if floating < period_end:
delivery_date.append(floating - cbd + cbd)
else:
delivery_date.append(period_end)
return pd.DatetimeIndex(delivery_date)
def get_expiry_date_from_horizon_date(self, horizon_date, tenor, cal=None,
asset_class='fx-vol'):
"""Calculates the expiry date of FX options, based on the horizon date,
the tenor and the holiday calendar associated with the asset.
Uses expiry rules from <NAME>'s FX option pricing book
Parameters
----------
horizon_date : pd.Timestamp (collection)
Horizon date of contract
tenor : str
Tenor of the contract
cal : str
Holiday calendar (usually related to the asset)
asset_class : str
'fx-vol' - FX options (default)
Returns
-------
pd.Timestamp (collection)
"""
if asset_class == 'fx-vol':
tenor_unit = ''.join(re.compile(r'\D+').findall(tenor))
asset_holidays = self.get_holidays(cal=cal)
if tenor_unit == 'ON':
tenor_digit = 1;
tenor_unit = 'D'
else:
tenor_digit = int(''.join(re.compile(r'\d+').findall(tenor)))
if tenor_unit == 'D':
return horizon_date + CustomBusinessDay(
n=tenor_digit, holidays=asset_holidays)
elif tenor_unit == 'W':
return horizon_date + Day(
n=tenor_digit * 7) + CustomBusinessDay(
n=0, holidays=asset_holidays)
else:
horizon_date = self.get_spot_date_from_horizon_date(
horizon_date, cal, asset_holidays=asset_holidays)
if tenor_unit == 'M':
pass
elif tenor_unit == 'Y':
tenor_digit = tenor_digit * 12
cbd = CustomBusinessDay(n=1, holidays=asset_holidays)
horizon_period_end = horizon_date + CustomBusinessMonthEnd(
tenor_digit + 1)
horizon_floating = horizon_date + DateOffset(
months=tenor_digit)
delivery_date = []
if isinstance(horizon_period_end, pd.Timestamp):
horizon_period_end = [horizon_period_end]
if isinstance(horizon_floating, pd.Timestamp):
horizon_floating = [horizon_floating]
# TODO: double check this!
for period_end, floating in zip(horizon_period_end,
horizon_floating):
if floating < period_end:
delivery_date.append(floating - cbd + cbd)
else:
delivery_date.append(period_end)
delivery_date = | pd.DatetimeIndex(delivery_date) | pandas.DatetimeIndex |
import pandas as pd
from utils.storage import load_frame, dump_frame, DATA_PATH, check_if_stepframe, check_if_vecframe
def daySplitter(step_name, data_path=DATA_PATH):
"""
Splits entries into days and saves results as vecframe.
"""
stepframe = load_frame(step_name, data_path)
check_if_stepframe(stepframe)
vec_len = stepframe.loc[stepframe.day == 0].shape[0]
columns = ['user', 'desc'] + list(range(vec_len))
vfs = []
for day in stepframe.day.unique():
vf = stepframe[stepframe.day == day].iloc[:, 4:999+4].T.astype('int32')
vf.columns = list(range(vec_len))
vf['user'] = vf.index.to_numpy(dtype=pd.np.int)
vf['desc'] = day
vfs.append(vf)
vecframe = pd.concat(vfs, sort=False, ignore_index=True)
vecframe = vecframe[columns]
vecframe.columns = vecframe.columns.astype(str)
check_if_vecframe(vecframe)
dump_frame(vecframe, '{}_dsp'.format(step_name))
def make_weekly_vecframe(step_name, vec_name='{}_week', data_path=DATA_PATH):
'''
Transforms a stepframe into a vecframe without splittling the data.
'desc' will always be 0.
:param step_name: name of the stepframe
:param vec_name: name under which vecframe will be saved
:param data_path: optional, path to data folder
:return:
'''
stepframe = load_frame(step_name, DATA_PATH)
vecframe = stepframe.loc[:, '0':].transpose()
vecframe.columns = [str(col) for col in vecframe.columns]
vecframe['user'] = vecframe.index
vecframe['user'] = vecframe['user'].apply(int)
vecframe['desc'] = [0] * vecframe.shape[0]
cols = list(vecframe.columns)
vecframe = vecframe[cols[-2:] + cols[:-2]]
# vecframe = vecframe.reset_index(drop=True)
check_if_vecframe(vecframe)
dump_frame(vecframe, vec_name.format(step_name), data_path)
def processTime(epochsfile, step_name , data_path=DATA_PATH):
epochs= | pd.read_csv(data_path+ epochsfile) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
def perform_pca(data, n_components=3, y=None):
"""
data = Data on which to perform PCA
n_components = 3. This is the number of Principal Components to
consider for the analysis
"""
# Separating and standardizing the features
features = data.copy(deep=True)
if (y is not None):
features = data.drop(y, axis=1)
x = StandardScaler().fit_transform(features)
pca = PCA(n_components=n_components)
principalComponents = pca.fit_transform(x)
principal_comp = pd.DataFrame(
data=principalComponents,
columns=['Principal Component '
+ str(i) for i in range(1, (n_components+1))])
if (y is not None):
final_df = | pd.concat([principal_comp, data[y]], axis=1) | pandas.concat |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gps_building_blocks.ml.statistical_inference.inference."""
from unittest import mock
from absl.testing import parameterized
import numpy as np
import pandas as pd
from scipy import stats
from sklearn import datasets
from sklearn import model_selection
from absl.testing import absltest
from gps_building_blocks.ml.statistical_inference import data_preparation
class InferenceTest(parameterized.TestCase):
_missing_data = pd.DataFrame(
data=[[np.nan, 0.0000],
[0.6000, 0.0000],
[0.4000, 3.0000],
[0.2000, np.nan]],
columns=['first', 'second'])
def test_missing_value_emits_warning_twice(self):
with self.assertWarns(data_preparation.MissingValueWarning):
data_preparation.InferenceData(self._missing_data)
with self.assertWarns(data_preparation.MissingValueWarning):
data_preparation.InferenceData(self._missing_data)
def test_check_data_raises_exception_on_missing_data(self):
inference_data = data_preparation.InferenceData(self._missing_data)
with self.assertRaises(data_preparation.MissingValueError):
inference_data.data_check(raise_on_error=True)
def test_invalid_target_column_raise_exception(self):
with self.assertRaises(KeyError):
data_preparation.InferenceData(
initial_data=self._missing_data,
target_column='non_ci_sono')
def test_impute_missing_values_replaced_with_mean(self):
inference_data = data_preparation.InferenceData(self._missing_data)
expected_result = pd.DataFrame(
data=[[0.4000, 0.0000],
[0.6000, 0.0000],
[0.4000, 3.0000],
[0.2000, 1.0000]],
columns=['first', 'second'])
result = inference_data.impute_missing_values(strategy='mean')
pd.testing.assert_frame_equal(result, expected_result)
def test_fixed_effect_raise_exception_on_categorical_covariate(self):
data = pd.DataFrame(
data=[['0', 0.0, '1', 3.0],
['1', 0.0, '2', 2.0],
['1', 1.0, '3', 2.0],
['1', 1.0, '4', 1.0]],
columns=['control_1', 'control_2', 'variable_1', 'variable_2'],
index=['group1', 'group2', 'group3', 'group3'])
inference_data = data_preparation.InferenceData(data)
with self.assertRaises(data_preparation.CategoricalCovariateError):
inference_data.control_with_fixed_effect(
strategy='quick',
control_columns=['control_1', 'control_2'],
min_frequency=1)
def test_fixed_effect_demeaning_subtract_mean_in_groups(self):
data = pd.DataFrame(
data=[['0', 0.0, 1, 3.0],
['1', 0.0, 2, 2.0],
['1', 1.0, 3, 2.0],
['1', 1.0, 4, 1.0]],
columns=['control_1', 'control_2', 'variable_1', 'variable_2'],
index=['group1', 'group2', 'group3', 'group3'])
expected_result = pd.DataFrame(
data=[['0', 0.0, 2.5, 2.0],
['1', 0.0, 2.5, 2.0],
['1', 1.0, 2.0, 2.5],
['1', 1.0, 3.0, 1.5]],
columns=data.columns,
index=data.index).set_index(['control_1', 'control_2'], append=True)
inference_data = data_preparation.InferenceData(data)
result = inference_data.control_with_fixed_effect(
strategy='quick',
control_columns=['control_1', 'control_2'],
min_frequency=1)
| pd.testing.assert_frame_equal(result, expected_result) | pandas.testing.assert_frame_equal |
from flask import Flask
from flask import jsonify, send_from_directory
from flask import request
from flask import current_app
from flask import make_response
from flask import render_template
from flask_pymongo import PyMongo
import xsg
import pymortar
import pandas as pd
import pendulum
import toml
import pytz
import json
import glob
import os
from bson import json_util
from collections import defaultdict
from functools import update_wrapper
from datetime import datetime, timedelta
from dashutil import get_start, generate_months, prevmonday, get_today
app = Flask(__name__, static_url_path='/static')
config = toml.load('config.toml')
TZ = pytz.timezone('US/Pacific')
client = pymortar.Client({
'mortar_address': config['Mortar']['url'],
'username': config['Mortar']['username'],
'password': config['<PASSWORD>']['password'],
})
sites = [config['Dashboard']['sitename']]
# MongoDB configurations
app.config['MONGO_DBNAME'] = 'modes'
app.config["MONGO_URI"] = "mongodb://localhost:27017/modes"
mongo = PyMongo(app)
# Push default modes to mongodb once script starts
INITIALIZED = False
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def state_to_string(state):
if state == 0:
return 'off'
elif state == 1:
return 'heat stage 1'
elif state == 2:
return 'cool stage 1'
elif state == 4:
return 'heat stage 2'
elif state == 5:
return 'cool stage 2'
else:
return 'unknown'
def dofetch(views, dataframes, start=None, end=None):
timeparams = None
if start is not None and end is not None:
timeparams=pymortar.TimeParams(
start=start.isoformat(),
end=end.isoformat(),
)
req = pymortar.FetchRequest(
sites=sites,
views=views,
dataFrames=dataframes,
time=timeparams
)
return client.fetch(req)
meter_view = pymortar.View(
name="meters",
definition="""SELECT ?meter WHERE {
?meter rdf:type brick:Building_Electric_Meter
};""",
)
meter_df = pymortar.DataFrame(
name="meters",
aggregation=pymortar.MEAN,
timeseries=[
pymortar.Timeseries(
view="meters",
dataVars=['?meter'],
)
]
)
tstats_view = pymortar.View(
name="tstats",
definition="""SELECT ?rtu ?zone ?tstat ?csp ?hsp ?temp ?state WHERE {
?rtu rdf:type brick:RTU .
?tstat bf:controls ?rtu .
?rtu bf:feeds ?zone .
?tstat bf:hasPoint ?temp .
?temp rdf:type/rdfs:subClassOf* brick:Temperature_Sensor .
?tstat bf:hasPoint ?csp .
?csp rdf:type/rdfs:subClassOf* brick:Supply_Air_Temperature_Heating_Setpoint .
?tstat bf:hasPoint ?hsp .
?hsp rdf:type/rdfs:subClassOf* brick:Supply_Air_Temperature_Cooling_Setpoint .
?tstat bf:hasPoint ?state .
?state rdf:type brick:Thermostat_Status .
};""",
)
tstats_df = pymortar.DataFrame(
name="tstats",
aggregation=pymortar.MAX,
timeseries=[
pymortar.Timeseries(
view="tstats",
dataVars=['?csp','?hsp','?temp','?state'],
),
]
)
room_temp_view = pymortar.View(
name="room_temp",
definition="""SELECT ?zone ?room ?sensor WHERE {
?zone rdf:type brick:HVAC_Zone .
?zone bf:hasPart ?room .
?sensor rdf:type/rdfs:subClassOf* brick:Temperature_Sensor .
?room bf:hasPoint ?sensor .
};""",
)
weather_view = pymortar.View(
name="weather_temp",
definition="""SELECT ?sensor WHERE {
?sensor rdf:type/rdfs:subClassOf* brick:Weather_Temperature_Sensor .
};""",
)
weather_df = pymortar.DataFrame(
name="weather_temp",
aggregation=pymortar.MEAN,
window='15m',
timeseries=[
pymortar.Timeseries(
view="weather_temp",
dataVars=['?sensor'],
)
],
)
# Home page is requesting /api/power/day/in/15m
@app.route('/api/power/<last>/in/<bucketsize>')
@crossdomain(origin='*')
def power_summary(last, bucketsize):
# first, determine the start date from the 'last' argument
start_date = get_start(last)
if last == 'year' and bucketsize == 'month':
ranges = generate_months(get_today().month - 1)
readings = []
times = []
for t0, t1 in ranges:
meter_df.window = '{0}d'.format((t0-t1).days)
res = dofetch([meter_view], [meter_df], t1, t0)
times.append(t1.tz_convert(TZ).timestamp()*1000)
readings.append(res['meters'].fillna('myNullVal').values[0][0])
# print('power_summary(): ', dict(zip(times, readings)))
return jsonify({'readings': dict(zip(times, readings))})
# otherwise,
meter_df.window = bucketsize
print('start_date', start_date)
res = dofetch([meter_view], [meter_df], start_date, datetime.now(TZ))
# print('res: \n', res['meters'])
res['meters'].columns = ['readings']
# print('power_summary(): ', res['meters'].tz_convert(TZ).fillna('myNullVal'))
return res['meters'].tz_convert(TZ).fillna('myNullVal').to_json()
# Home page is requesting /api/energy/year/in/month & /api/energy/month/in/1d
@app.route('/api/energy/<last>/in/<bucketsize>')
@crossdomain(origin='*')
def energy_summary(last, bucketsize):
start_date = get_start(last)
if last == 'year' and bucketsize == 'month':
ranges = generate_months(get_today().month - 1)
readings = []
times = []
for t0, t1 in ranges:
meter_df.window = '15m'
res = dofetch([meter_view], [meter_df], t1, t0)
df = res['meters'].copy()
df.columns = ['readings']
df /= 4. # divide by 4 to get 15min (kW) -> kWh
times.append(pd.to_datetime(t1.isoformat()))
readings.append(df['readings'].sum())
df = | pd.DataFrame(readings, index=times, columns=['readings']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
Script for grabbing a csv from a Google Sheet id and outputting JSON [compatible with Timeline JS](https://timeline.knightlab.com/docs/json-format.html)
Todo:
- Support for slide types
- Support for eras
- Support for scale
Usage: Update gsheet_id with Google Sheet ID below and run script
'''
gsheet_id = '1Tb0kabLp4K8lP-s3wWqU5QwRo0RufcT-OmeHH-h7oP4'
import csv, json
import pandas as pd
from sys import argv
'''
Constants
'''
gsheet_url = ["https://docs.google.com/spreadsheet/pub?key=","&output=csv"]
tjs_json = {}
start_date = ['s_year','s_month','s_day','s_time']
end_date = ['e_year','e_month','e_day','e_time']
display_date = ['display_date']
media = ["url","credit","caption","thumbnail"]
text = ["headline","text"]
slide = ["type", "group"]
background = ["background"]
date = ["year", "month", "day", "time","display_date"]
time = ["hour","minute","second","millisecond"]
'''
Return Timeline JS objects
'''
def to_object(data,param):
out_object = {}
for i, col in enumerate(param):
# handle background object
if param == background:
if data[i] != data[i]: pass
elif data[i].find('http') > -1: out_object['url'] = data[i]
else: out_object['color'] = data[i]
# handle NaN
elif not data.get(i) or data[i] != data[i]: out_object[col] = ''
# handle floats
elif type(data[i]) == float: out_object[col] = int(data[i])
# default case
else: out_object[col] = data[i]
# return time object
if out_object.get('time'):
out_time = out_object['time'].split(':')
for i,col in enumerate(out_time):
out_object[time[i]] = int(out_time[i])
out_object.pop('time',None)
return out_object
'''
Parse Google Sheet CSV
'''
def parse_csv(gsheet_id, tjs_json):
data = pd.read_csv(gsheet_url[0] + gsheet_id + gsheet_url[1], header=0, \
names = start_date + end_date + display_date + text + media + slide + background)
df = | pd.DataFrame(data) | pandas.DataFrame |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = | TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx') | pandas.TimedeltaIndex |
import os
import shutil
#import re
import sys
import platform
import subprocess
import numpy as np
import json
import pickle
import pandas as pd
from pandas import Series
import xml.etree.ElementTree as ET
import glob
import argparse
try:
import lvdb
except:
import pdb as lvdb
print('using pdb instead of lvdb')
pass
def ensure_dir_exists (datadir):
if not os.path.exists(datadir):
os.makedirs(datadir)
if not os.path.exists(datadir):
themessage = 'Directory {} could not be created.'.format(datadir)
if (int(platform.python_version()[0]) > 2):
raise NotADirectoryError(themessage)
else:
# python 2 doesn't have the impressive exception vocabulary 3 does
# so just raising a generic exception with a useful description
raise BaseException(themessage)
def rsync_the_file (from_location, to_location):
# Assuming that the responses for how platform.system() responds to
# different OSes given here are correct (though not assuming case):
# https://stackoverflow.com/questions/1854/python-what-os-am-i-running-on
if platform.system().lower() is 'windows':
print('Windows detected. The rsync command that is about to be', \
'executed assumes a Linux or Mac OS; no guarantee that it', \
'will work with Windows. Please be ready to transfer files', \
'via alternate means if necessary.')
subprocess.call(['rsync', '-vaPhz', from_location, to_location])
def df_to_pickle(thedf, thefilename):
thedf.to_pickle(thefilename);
def df_to_csv(thedf, thefilename):
thedf.to_csv(thefilename, index_label='index');
def df_to_json(thedf, thefilename):
thedf.to_json(thefilename, orient='records', double_precision = 10, force_ascii = True);
def glob2df(datadir, linecount, jobnum_list):
print(datadir)
thepaths = glob.iglob(datadir + '/*/')
results_dirs_used = []
df_list = []
progress_counter = 1000;
counter = 0;
for dirname in sorted(thepaths):
dirstructure = dirname.split('/')
lastdir = dirstructure[-1]
if '_job_' not in lastdir:
# handle trailing slash if present
lastdir = dirstructure[-2];
if '_job_' not in lastdir:
# something's wrong; skip this case
continue;
if '_task_' not in lastdir:
# something's wrong; skip this case
continue;
if 'latest' in lastdir:
continue;
filename = dirname + 'summary.csv'
if not os.path.isfile(filename):
print('No summary file at ', filename);
# no summary file means no results, unless results saved using a
# different mechanism, which is out of scope of this script
continue;
missionname = dirname + 'mission.xml'
if not os.path.isfile(missionname):
print('No mission file at ', missionname);
continue;
split_on_task = lastdir.split('_task_')
tasknum = int(split_on_task[-1])
jobnum = int(split_on_task[0].split('_job_',1)[1])
if jobnum_list and jobnum not in jobnum_list:
# lvdb.set_trace()
# print('Job {} not in list of jobs; skipping'.format(jobnum))
continue;
counter += 1;
if counter > progress_counter:
print('j ', jobnum, ', t ', tasknum)
counter = 0;
# thisjob_df = pd.DataFrame(index=range(1))
thisjob_df = pd.read_csv(filename)
if thisjob_df.empty:
# no actual content in df; maybe only header rows
continue;
# Add column to df for job number
thisjob_df['job_num']=jobnum
# and task number
thisjob_df['task_num']=tasknum
# and results directory
thisjob_df['results_dir']=lastdir
# add how many rows there are in the df so plot scripts know what to
# expect
thisjob_df['num_rows']=len(thisjob_df.index)
df_to_append = pd.DataFrame()
thisjob_params_df = xml_param_df_cols(missionname);
num_lines = len(thisjob_df.index)
if linecount > 0:
if num_lines < linecount:
continue;
df_to_append = pd.concat([thisjob_params_df]*num_lines, ignore_index=True);
if df_to_append.empty:
continue;
this_job_df = thisjob_df
if not df_to_append.empty:
this_job_df = | pd.concat([thisjob_df, df_to_append], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # 2019-01-21: Initial Data Exploration
#
# ### Authors
# * <NAME> (<EMAIL>)
# ## Preparations
# In[1]:
# --- Imports
# Standard libraries
import os
import re
# External packages
import matplotlib.pyplot as plt
import numpy
import pandas
# In[2]:
# --- Configuration Parameters
# Data directory
data_dir = os.environ['DATA_DIR']
# Materials
materials = {
'actinolite': 0,
'alunite': 1,
'calcite': 2,
}
# ### Data Preparation
# In[3]:
# --- Load data from files
# Get file list
data_files = [os.path.join(data_dir, file_name) for file_name in os.listdir(data_dir)
if not file_name.startswith('.') and
os.path.isfile(os.path.join(data_dir, file_name))]
# Initialize spectra dataset
spectra_data = | pandas.DataFrame() | pandas.DataFrame |
# http://github.com/timestocome
# take a look at the differences in daily returns for recent bull and bear markets
# http://afoysal.blogspot.com/2016/08/arma-and-arima-timeseries-prediction.html
# predictions appear to increase and decrease with actual returns but scale is much smaller
# of course if it was this easy there'd be a lot of rich statisticians in the world.
import numpy as np
import pandas as pd
from statsmodels.tsa.arima_model import ARMA, ARIMA
from statsmodels.tsa.stattools import adfuller, arma_order_select_ic
import matplotlib.pyplot as plt
# pandas display options
pd.options.display.max_rows = 1000
pd.options.display.max_columns = 25
pd.options.display.width = 1000
######################################################################
# data
########################################################################
# read in datafile created in LoadAndMatchDates.py
data = pd.read_csv('StockDataWithVolume.csv', index_col='Date', parse_dates=True)
features = [data.columns.values]
# create target --- let's try Nasdaq value 1 day change
data['returns'] = (data['NASDAQ'] - data['NASDAQ'].shift(1)) / data['NASDAQ']
# remove nan row from target creation
data = data.dropna()
#########################################################################
# split into bear and bull markets
##########################################################################
bull1_start = pd.to_datetime('01-01-1990') # beginning of this dataset
bull1_end = pd.to_datetime('07-16-1990')
iraq_bear_start = pd.to_datetime('07-17-1990')
iraq_bear_end = pd.to_datetime('10-11-1990')
bull2_start = pd.to_datetime('10-12-1990')
bull2_end = pd.to_datetime('01-13-2000')
dotcom_bear_start = pd.to_datetime('01-14-2000')
dotcom_bear_end = | pd.to_datetime('10-09-2002') | pandas.to_datetime |
import warnings
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.core.algorithms import safe_sort
class TestPairwise:
# GH 7738
df1s = [
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=["C", "C"]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1.0, 0]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0.0, 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=["C", 1]),
DataFrame([[2.0, 4.0], [1.0, 2.0], [5.0, 2.0], [8.0, 1.0]], columns=[1, 0.0]),
DataFrame([[2, 4.0], [1, 2.0], [5, 2.0], [8, 1.0]], columns=[0, 1.0]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.0]], columns=[1.0, "X"]),
]
df2 = DataFrame(
[[None, 1, 1], [None, 1, 2], [None, 3, 2], [None, 8, 1]],
columns=["Y", "Z", "X"],
)
s = Series([1, 1, 3, 8])
def compare(self, result, expected):
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("f", [lambda x: x.cov(), lambda x: x.corr()])
def test_no_flex(self, f):
# DataFrame methods (which do not call _flex_binary_moment())
results = [f(df) for df in self.df1s]
for (df, result) in zip(self.df1s, results):
tm.assert_index_equal(result.index, df.columns)
tm.assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@pytest.mark.parametrize(
"f",
[
lambda x: x.expanding().cov(pairwise=True),
lambda x: x.expanding().corr(pairwise=True),
lambda x: x.rolling(window=3).cov(pairwise=True),
lambda x: x.rolling(window=3).corr(pairwise=True),
lambda x: x.ewm(com=3).cov(pairwise=True),
lambda x: x.ewm(com=3).corr(pairwise=True),
],
)
def test_pairwise_with_self(self, f):
# DataFrame with itself, pairwise=True
# note that we may construct the 1st level of the MI
# in a non-monotonic way, so compare accordingly
results = []
for i, df in enumerate(self.df1s):
result = f(df)
tm.assert_index_equal(result.index.levels[0], df.index, check_names=False)
tm.assert_numpy_array_equal(
| safe_sort(result.index.levels[1]) | pandas.core.algorithms.safe_sort |
import os
from io import BytesIO
import fastavro as fa
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.orc as orc
import pytest
import cudf
from cudf.tests.utils import assert_eq
if not os.environ.get("RUN_HDFS_TESTS"):
pytestmark = pytest.mark.skip("Env not configured to run HDFS tests")
basedir = "/tmp/test-hdfs"
host = "localhost" # hadoop hostname
port = 8020 # hadoop rpc port
@pytest.fixture
def hdfs(scope="module"):
# Default Rpc port can be 8020/9000 depending on the hdfs config
fs = pa.hdfs.connect(host=host, port=port)
try:
if not fs.exists(basedir):
fs.mkdir(basedir)
except pa.lib.ArrowIOError:
pytest.skip("hdfs config probably incorrect")
return fs
@pytest.fixture
def pdf(scope="module"):
df = pd.DataFrame()
df["Integer"] = np.array([2345, 11987, 9027, 9027])
df["Float"] = np.array([9.001, 8.343, 6, 2.781])
df["Integer2"] = np.array([2345, 106, 2088, 789277], dtype="uint64")
df["String"] = np.array(["Alpha", "Beta", "Gamma", "Delta"])
df["Boolean"] = np.array([True, False, True, False])
return df
@pytest.mark.parametrize("test_url", [False, True])
def test_read_csv(tmpdir, pdf, hdfs, test_url):
fname = tmpdir.mkdir("csv").join("file.csv")
# Write to local file system
pdf.to_csv(fname)
# Read from local file system as buffer
with open(fname, mode="rb") as f:
buffer = BytesIO(f.read())
# Write to hdfs
hdfs.upload(basedir + "/file.csv", buffer)
if test_url:
hd_fpath = "hdfs://{}:{}{}/file.csv".format(host, port, basedir)
else:
hd_fpath = "hdfs://{}/file.csv".format(basedir)
got = cudf.read_csv(hd_fpath)
# Read pandas from byte buffer
with hdfs.open(basedir + "/file.csv") as f:
expect = | pd.read_csv(f) | pandas.read_csv |
from market import getMarket as gm
from market import getData as gd
from price import getPrice as gp
from dba import mysql as ms
from settings import config
import pandas as pd
import datetime
import time
# 插入市场信息
def insert_market():
# idList, name = gm.run()
idList = gd.get_id_list()
name = gd.get_name_list()
df = {
"market_id": idList,
"name": name,
}
df = pd.DataFrame(df)
# print(df)
ms.insert_market(config.config, df)
# 查询市场列表
def select_market():
db_data = ms.select_market(config.config, '山东')
for each in db_data:
print(each.name)
# 插入市场价格信息
def insert_price():
year,month,day = CountDay()
MarketNull = []
List = gd.get_id_list()
for each in List:
url = gp.init(each, year, month, day)
# 爬取网页信息返回未清理的参数
Vdate,Vmarket,Vname,Vlow,Vhigh = gp.getHtml(url)
# 判断是否为空数据,空数据就直接跳过本次循环
if(len(Vname)<=0):
print(f'本市场ID没有数据,:marketID:{each}')
MarketNull.append(each)
continue
else:
# 返回清理的参数
date,mark,name,low,high = gp.ClearData(Vdate,Vmarket,Vname,Vlow,Vhigh)
#转为dataframe
df = {
"date": date,
"market_name": mark,
"vegetable": name,
"low": low,
"high": high
}
df = pd.DataFrame(df)
# print(df)
ms.insert_price(config.config, df)
time.sleep(1)
# 将没有数据的ID对应名称后存入数据库
#insert_NULL_Market(MarketNull)
# 插入空数据的Market
def insert_NULL_Market(MarketNull):
NullName = []
nameList = gd.get_json()
for each in MarketNull:
for j in nameList:
if(each == j['ID']):
NullName.append(j['name'])
ef = {
'market_id': MarketNull,
'name': NullName
}
ef = pd.DataFrame(ef)
ms.insert_null_market(config.config, ef)
# 查询价格
def select_vege_price(name, date):
data = ms.select_price(config.config, name, date)
for each in data:
print(f'市场名:{each.market_name},最低价:{each.low},最高价:{each.high}')
def delete_null_market():
temp_id = []
have_id = []
null = ms.select_null_market(config.config)
for each in null:
temp_id.append(each.market_id)
market_id = gd.get_id_list()
# 差集
have_id = list(set(market_id).difference(set(temp_id)))
have_name = []
market_name = gd.get_json()
for each in market_name:
for j in have_id:
if(j == each['ID']):
have_name.append(each['name'])
df = {
"ID": have_id,
"name": have_name,
}
df = | pd.DataFrame(df) | pandas.DataFrame |
"""
TRANSFORM
Create relevant statistics about Google trends data
for streamlit_app.py
"""
import pandas as pd
import logging
import streamlit as st
def create_query_date(df):
""" Day format of query_timestamp """
df['query_date'] = pd.to_datetime(df.query_timestamp).dt.date
return df
def drop_duplicates(df):
""" Remove duplicates for anlaysis """
df_nodup = df.drop_duplicates(subset=['query', 'value', 'query_date', 'ranking'])
logging.info(f"Drop {len(df)-len(df_nodup)} duplicates")
return df_nodup
def trends_statistics(df):
"""Generate relevant statistics for a ranking category of Google trends (rising or top)"""
# time series indicator: t
df['t'] = df.groupby('query_date').ngroup()
# most recent period
t_max = df.t.max()
logging.debug(f"df dtypes after adding time series: {df.dtypes}")
# ranking
# absolute
df['rank_t'] = df.groupby('t').value.rank(method='first', ascending=False)
# rank in previous period (t-1)
df['rank_t-1'] = df.groupby('query').rank_t.shift()
# rank change from previous, t-1, to current period, t
df['rank_absolute_change'] = df.rank_t - df['rank_t-1']
# winners and loosers (ranking of absoulte changes)
df['rank_absoulte_change_ranking'] = df.groupby(
't').rank_absolute_change.rank(method='first', ascending=False)
# percentile
df['rank_pct_t'] = df.groupby('t').value.rank(
method='first', ascending=False, pct=True)
df['rank_pct_t-1'] = df.groupby('query').rank_pct_t.shift()
df['rank_pct_change'] = df.rank_pct_t - df['rank_pct_t-1']
# new entries at time t
df['new_entry_t'] = (pd.isna(df['rank_t-1']) & | pd.notnull(df.rank_t) | pandas.notnull |
import numpy as np
import pandas as pd
from sklearn.utils.estimator_checks import check_estimator
from autofeat import AutoFeatRegressor, AutoFeatClassifier
def get_random_data(seed=15):
# generate some toy data
np.random.seed(seed)
x1 = np.random.rand(1000)
x2 = np.random.randn(1000)
x3 = np.random.rand(1000)
target = 2 + 15*x1 + 3/(x2 - 1/x3) + 5*(x2 + np.log(x1))**3
X = np.vstack([x1, x2, x3]).T
return X, target
def test_do_almost_nothing():
X, target = get_random_data()
afreg = AutoFeatRegressor(verbose=1, feateng_steps=0, featsel_runs=0)
df = afreg.fit_transform(pd.DataFrame(X, columns=["x1", "x2", "x3"]), target)
assert list(df.columns) == ["x1", "x2", "x3"], "Only original columns"
df = afreg.transform(pd.DataFrame(X, columns=["x1", "x2", "x3"]))
assert list(df.columns) == ["x1", "x2", "x3"], "Only original columns"
def test_regular_X_y():
# autofeat with numpy arrays
X, target = get_random_data()
afreg = AutoFeatRegressor(verbose=1, feateng_steps=3)
df = afreg.fit_transform(X, target)
assert afreg.score(X, target) >= 0.999, "R^2 should be 1."
assert afreg.score(df, target) >= 0.999, "R^2 should be 1."
assert list(df.columns)[:3] == ["x000", "x001", "x002"], "Wrong column names"
def test_regular_df_X_y():
# autofeat with df without column names
X, target = get_random_data()
afreg = AutoFeatRegressor(verbose=1, feateng_steps=3)
df = afreg.fit_transform(pd.DataFrame(X), pd.DataFrame(target))
# score once with original, once with transformed data
assert afreg.score(pd.DataFrame(X), target) >= 0.999, "R^2 should be 1."
assert afreg.score(df, target) >= 0.999, "R^2 should be 1."
assert list(df.columns)[:3] == ["0", "1", "2"], "Wrong column names"
def test_weird_colnames():
# autofeat with df with weird column names
X, target = get_random_data()
afreg = AutoFeatRegressor(verbose=1, feateng_steps=3)
df = afreg.fit_transform(pd.DataFrame(X, columns=["x 1.1", 2, "x/3"]), pd.DataFrame(target))
assert afreg.score(pd.DataFrame(X, columns=["x 1.1", 2, "x/3"]), target) >= 0.999, "R^2 should be 1."
assert list(df.columns)[:3] == ["x 1.1", "2", "x/3"], "Wrong column names"
# error if the column names aren't the same as before
try:
afreg.score(pd.DataFrame(X, columns=["x 11", 2, "x/3"]), target)
except ValueError:
pass
else:
raise AssertionError("Should throw error on mismatch column names")
def test_nans():
# nans are ok in transform but not fit or predict (due to sklearn model)
X, target = get_random_data()
X[998, 0] = np.nan
X[999, 1] = np.nan
afreg = AutoFeatRegressor(verbose=1, feateng_steps=3)
try:
_ = afreg.fit_transform(pd.DataFrame(X, columns=["x 1.1", 2, "x/3"]), pd.DataFrame(target))
except ValueError:
pass
else:
raise AssertionError("fit with NaNs should throw an error")
_ = afreg.fit_transform(pd.DataFrame(X[:900], columns=["x 1.1", 2, "x/3"]), pd.DataFrame(target[:900]))
try:
_ = afreg.predict(pd.DataFrame(X[900:], columns=["x 1.1", 2, "x/3"]))
except ValueError:
pass
else:
raise AssertionError("predict with NaNs should throw an error")
df = afreg.transform(pd.DataFrame(X, columns=["x 1.1", 2, "x/3"]))
assert all([pd.isna(df.iloc[998, 0]), | pd.isna(df.iloc[999, 1]) | pandas.isna |
import pandas as pd
from unittest import TestCase
from .sar import Reservoirs
from .hidro import Stations
from .hidro.serie_temporal import SerieTemporal
from .hidro import EntityApi
class TestApi(TestCase):
def test_get_stations_by_city(self):
recife = Stations(name_city="RECIFE")
piranhas = Stations(name_city="PIRANHAS")
print(recife["39098600"])
print(piranhas["49330000"])
def test_get_data_from_ana_hydro_serie_temporal(self):
serie = SerieTemporal(code="01036007", type_data='2')
print(serie.data)
def test_get_data_from_ana_hydro_flow_height_none(self):
stations = Stations(code_start="49330000")
stations_xingo = stations
print(stations_xingo["49330000"])
def test_get_data_from_ana_hydro_flow_height(self):
stations = Stations(code_start="49330000")
stations_xingo = stations
print(stations_xingo["49330000"].series_temporal(type_data='3')) #tz='Etc/GMT-3'
print(stations_xingo["49330000"].series_temporal(type_data='1')) #tz='Etc/GMT-3'
def test_get_from_ana_hydro_rainfall(self):
stations = Stations(name_city="RECIFE")
stations_recife = stations
print(stations_recife["834003"].type_station)
print(stations_recife["834003"].series_temporal(type_data='2'))
def test_get_data_from_sar(self):
reservoir = Reservoirs()["19086"]
aflue = reservoir.series_temporal.affluence
deflu = reservoir.series_temporal.flow
afluencia = [192.00, -68301.00, 68795.00, 382.00, 489.00, 719.00]
defluencia = [78.00, 122.00, 95.00, 105.00, 173.00, 245.00]
data_flow = {"A19086": afluencia, "D19086": defluencia}
data = ["05/01/2002", "06/01/2002", "07/01/2002", "08/01/2002", "09/01/2002", "10/01/2002"]
data = | pd.to_datetime(data, dayfirst=True) | pandas.to_datetime |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import datetime
import glob
import networkx
import numpy
import os
import pandas
import pickle
import networkx_analysis
nodes_overlap_species_nm_fns = [ networkx_analysis.nm_name_equal,
networkx_analysis.nm_name_clean_equal,
networkx_analysis.nm_name_clean_approx,
#networkx_analysis.nm_gene_id_intersect,
#networkx_analysis.nm_name_clean_approx_OR_gene_id_intersect,
networkx_analysis.nm_bqbiol_is_equal,
networkx_analysis.nm_bqbiol_is_overlaps,
networkx_analysis.nm_name_equal_w_participants,
networkx_analysis.nm_name_clean_equal_w_participants,
networkx_analysis.nm_name_clean_approx_w_participants,
#networkx_analysis.nm_gene_id_intersect_w_participants,
networkx_analysis.nm_bqbiol_is_equal_w_participants,
networkx_analysis.nm_bqbiol_is_overlaps_w_participants]
nodes_overlap_species_nm_fns_paper = [
networkx_analysis.nm_name_clean_equal,
networkx_analysis.nm_name_clean_approx,
networkx_analysis.nm_bqbiol_is_equal,
networkx_analysis.nm_bqbiol_is_overlaps,
networkx_analysis.nm_name_clean_equal_w_participants,
networkx_analysis.nm_name_clean_approx_w_participants,
networkx_analysis.nm_bqbiol_is_equal_w_participants,
networkx_analysis.nm_bqbiol_is_overlaps_w_participants]
nodes_overlap_species_nm_fns_paper_names = ["nmeq", "appeq", "enteq", "entov", "nmeq/wc", "appeq/wc", "enteq/wc", "entov/wc"]
nodes_overlap_reactions_nm_fns = [ networkx_analysis.nm_bqbiol_is_equal,
networkx_analysis.nm_bqbiol_is_overlaps,
networkx_analysis.nm_bqbiol_is_overlaps_sbo_is_a]
nodes_overlap_reactions_nm_fns_names = ["sboeq", "sboov", "sboisa"]
subgraphs_overlap_node_match_fns = [networkx_analysis.nm_name_clean_AND_nm_bqbiol_is_equal,
networkx_analysis.nm_name_clean_AND_nm_bqbiol_is_overlaps,
networkx_analysis.nm_name_clean_AND_nm_bqbiol_is_overlaps_sbo_is_a,
networkx_analysis.nm_name_clean_approx_AND_nm_bqbiol_is_equal,
networkx_analysis.nm_name_clean_approx_AND_nm_bqbiol_is_overlaps,
networkx_analysis.nm_name_clean_approx_AND_nm_bqbiol_is_overlaps_sbo_is_a,
networkx_analysis.nm_name_clean_approx_OR_bqbiol_is_equal_AND_nm_bqbiol_is_equal,
networkx_analysis.nm_name_clean_approx_OR_bqbiol_is_equal_AND_nm_bqbiol_is_overlaps,
networkx_analysis.nm_name_clean_approx_OR_bqbiol_is_equal_AND_nm_bqbiol_is_overlaps_sbo_is_a,
networkx_analysis.nm_name_clean_approx_OR_bqbiol_is_overlaps_AND_nm_bqbiol_is_equal,
networkx_analysis.nm_name_clean_approx_OR_bqbiol_is_overlaps_AND_nm_bqbiol_is_overlaps,
networkx_analysis.nm_name_clean_approx_OR_bqbiol_is_overlaps_AND_nm_bqbiol_is_overlaps_sbo_is_a,
networkx_analysis.nm_name_clean_w_participants_AND_nm_bqbiol_is_equal,
networkx_analysis.nm_name_clean_w_participants_AND_nm_bqbiol_is_overlaps,
networkx_analysis.nm_name_clean_w_participants_AND_nm_bqbiol_is_overlaps_sbo_is_a,
networkx_analysis.nm_name_clean_approx_w_participants_AND_nm_bqbiol_is_equal,
networkx_analysis.nm_name_clean_approx_w_participants_AND_nm_bqbiol_is_overlaps,
networkx_analysis.nm_name_clean_approx_w_participants_AND_nm_bqbiol_is_overlaps_sbo_is_a,
networkx_analysis.nm_name_clean_approx_OR_bqbiol_is_equal_w_participants_AND_nm_bqbiol_is_equal,
networkx_analysis.nm_name_clean_approx_OR_bqbiol_is_equal_w_participants_AND_nm_bqbiol_is_overlaps,
networkx_analysis.nm_name_clean_approx_OR_bqbiol_is_equal_w_participants_AND_nm_bqbiol_is_overlaps_sbo_is_a,
networkx_analysis.nm_name_clean_approx_OR_bqbiol_is_overlaps_w_participants_AND_nm_bqbiol_is_equal,
networkx_analysis.nm_name_clean_approx_OR_bqbiol_is_overlaps_w_participants_AND_nm_bqbiol_is_overlaps,
networkx_analysis.nm_name_clean_approx_OR_bqbiol_is_overlaps_w_participants_AND_nm_bqbiol_is_overlaps_sbo_is_a]
subgraphs_overlap_node_match_fns_names = ["nmeq, sboeq", "nmeq, sboov", "nmeq, sboisa",
"appeq, sboeq", "appeq, sboov","appeq, sboisa",
"appeq/enteq, sboeq", "appeq/enteq, sboov","appeq/enteq, sboisa",
"appeq/entov, sboeq", "appeq/entov, sboov", "appeq/entov, sboisa",
"nmeq/wc, sboeq", "nmeq/wc, sboov", "nmeq/wc, sboisa",
"appeq/wc, sboeq", "appeq/wc, sboov", "appeq/wc, sboisa",
"appeq/enteq/wc, sboeq", "appeq/enteq/wc, sboov", "appeq/enteq/wc, sboisa",
"appeq/entov/wc, sboeq", "appeq/entov/wc, sboov", "appeq/entov/wc, sboisa"]
subgraphs_overlap_node_match_fns_names_map = { k.__name__: v for k, v in zip( subgraphs_overlap_node_match_fns, subgraphs_overlap_node_match_fns_names)}
########################################################################
########################################################################
# INITIALIZE
def mtor_dir():
directory = os.environ.get("MTOR_DATA_DIR")
if directory is None:
directory = ""
return directory + "/"
def mtor_dir_results():
return mtor_dir() + "/results/"
def mtor_dir_results_graphs():
return mtor_dir() + "/results-graphs/"
def mtor_dir_results_statistics():
return mtor_dir() + "/results-statistics/"
def now():
return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
########################################################################
########################################################################
# HELPERS
def plot_precision_recall_f_score( data, name, x_ticks = None, columns = ["precision", "recall","f-score"], kind = "line", legend_loc = 4, directory = mtor_dir_results_graphs()):
""" data - pandas data frame
kind - bar or lines
Plots precision, recall, f-score, exports to file """
if x_ticks is None:
x_ticks = data["name"]
import matplotlib
import matplotlib.pyplot
matplotlib.style.use('ggplot')
matplotlib.pyplot.figure();
data[columns].plot( kind = kind);
matplotlib.pyplot.legend(loc = legend_loc)
matplotlib.pyplot.xticks( range(len(data)), x_ticks, rotation = 30, ha ="right")
matplotlib.pyplot.ylim((0,100))
matplotlib.pyplot.savefig( "%s%s-%s.pdf" % (directory, name, kind))
def plot_comparison( datasets, dataset_names, name, x_ticks,
columns = ["precision", "recall", "f-score"], legend_loc = 4,
directory = mtor_dir_results_graphs()):
""" Plot data side by side"""
import matplotlib
import matplotlib.pyplot
matplotlib.style.use('ggplot')
for column in columns:
matplotlib.pyplot.figure();
for d in datasets:
matplotlib.pyplot.plot( d[column]);
matplotlib.pyplot.legend( dataset_names, loc = legend_loc)
matplotlib.pyplot.xticks( range(len(datasets[0])), x_ticks, rotation = 30, ha ="right")
matplotlib.pyplot.ylim((0,100))
matplotlib.pyplot.title( "%s %s" % (name, column))
matplotlib.pyplot.savefig( "%s%s-%s-%s.pdf" % (directory,name, column, "-".join(dataset_names)))
########################################################################
########################################################################
# INITIALIZE
def initialize_mTORpathway_target():
""" Initialize target.networkx.pickle """
networkx_analysis.load_pathway( "TARGET",
input_file = mtor_dir() + "mTORpathway-sbml.xml",
output_file = mtor_dir() + "TARGET.networkx.pickle")
def initialize_mTORpathway_source( source = "DEFAULT"):
""" initialize source NLP, ANN or others, None is default """
# initialize annotation
prefix = mtor_dir() + "events-" + source + "/"
suffix = ".sbml.xml"
files_target = glob.glob( prefix + "*" + suffix)
for f in files_target:
print( "Processing %s" % f)
id = f[len(prefix):-len(suffix)]
name = "%s-%s" % (source,id)
networkx_analysis.load_pathway( name,
input_file = f,
output_file = f + ".networkx.pickle",
prefix = id + "_")
print( "Combining graphs")
files = glob.glob( prefix + "*.networkx.pickle")
print( "Loading all graphs ...")
graphs = [pickle.load( open( f, "rb")) for f in files]
print( "Composing a single graph ...")
graph = networkx.Graph()
for g in graphs:
graph = networkx.compose( graph, g, name = source)
graph.name = source
output_file = "%s/%s.networkx.pickle" % (mtor_dir(), source)
print( "Exporting single graph to %s" % output_file)
pickle.dump( graph, open( output_file, "wb"))
########################################################################
########################################################################
# run analysis
def run_simple_stats( dataset = "TARGET"):
""" Export simple stats nodes, reaction numbers etc. """
f = "%s%s.networkx.pickle" % (mtor_dir(), dataset)
print( "%s: Processing %s from %s" % (now(), dataset,f))
graph = networkx.read_gpickle( f)
print( "%s:%s:%s: Filter isolated nodes" % (now(),dataset,graph))
graph_no_isolated_nodes = networkx_analysis.filter_graph_remove_isolated_nodes( graph)
for graph in [ graph, graph_no_isolated_nodes]:
export_file = "%s%s-simple-stats.pickle" % (mtor_dir_results_statistics(), graph.name)
networkx_analysis.run_analysis( graph, export_file)
def run_simple_stats_overlap( dataset_1 = "TARGET",
dataset_2 = "DEFAULT"):
print( "%s: Processing %s/%s" % (now(), dataset_1, dataset_2))
f1 = "%s%s.networkx.pickle" % (mtor_dir(), dataset_1)
f2 = "%s%s.networkx.pickle" % (mtor_dir(), dataset_2)
print( "Loading %s/%s" % (dataset_1, f1))
graph_1 = networkx.read_gpickle( f1)
print( "%s: Loading successful %s/%s/%s" % (now(), dataset_1, graph_1.name, f1))
print( "%s: Loading %s/%s" % (now(), dataset_2, f2))
graph_2 = networkx.read_gpickle( f2)
print( "%s: Loading successful %s/%s/%s" % (now(), dataset_2, graph_2.name, f2))
print( "%s: Computing %s-NO-ISOLATED-NODES" % (now(), dataset_2))
graph_2_no_isolated_nodes = networkx_analysis.filter_graph_remove_isolated_nodes( graph_2)
print( "%s: Finished computing %s-NO-ISOLATED-NODES/%s" % (now(), dataset_2, graph_2_no_isolated_nodes.name))
##################### SPECIES, REACTIONS, COMPARTMENTS OVERLAP
networkx_analysis.run_analysis_signatures( graph_1, graph_2,
export_file = "%s%s__%s-simple-stats-overlap.pickle" % (mtor_dir_results_statistics(), graph_1.name, graph_2.name))
networkx_analysis.run_analysis_signatures( graph_1, graph_2_no_isolated_nodes,
export_file = "%s%s__%s-simple-stats-overlap.pickle" % (mtor_dir_results_statistics(), graph_1.name, graph_2_no_isolated_nodes.name))
def run_node_overlap( node_match_fn_name,
dataset_1 = "TARGET",
dataset_2 = "ANN",
export_results = True,
export_results_prefix = mtor_dir_results() + "results-nodes-overlap-max",
compute_overlap_for_no_isolated_nodes = False,
ignore_existing = True):
print( "Processing %s/%s %s" % ( dataset_1, dataset_2, node_match_fn_name))
f1 = "%s%s.networkx.pickle" % (mtor_dir(), dataset_1)
print( "Loading %s at %s" % (dataset_1, f1))
graph_1 = networkx.read_gpickle( f1)
f2 = "%s%s.networkx.pickle" % (mtor_dir(), dataset_2)
print( "Loading %s at %s" % (dataset_2, f2))
graph_2 = networkx.read_gpickle( f2)
node_match_species = filter( lambda n: n.__name__ == node_match_fn_name, nodes_overlap_species_nm_fns)
node_match_reaction = filter( lambda n: n.__name__ == node_match_fn_name, nodes_overlap_reactions_nm_fns)
if compute_overlap_for_no_isolated_nodes:
print( "Computing %s-NO-ISOLATED-NODES" % graph_2.name)
graph_2_no_isolated_nodes = networkx_analysis.filter_graph_remove_isolated_nodes( graph_2)
if node_match_species:
node_match_species = node_match_species[0]
networkx_analysis.run_analysis_nodes_overlap_max( networkx_analysis.filter_species( graph_1),
networkx_analysis.filter_species( graph_2),
node_match = node_match_species,
export_results = export_results,
export_results_prefix = export_results_prefix,
ignore_existing = ignore_existing);
if compute_overlap_for_no_isolated_nodes:
networkx_analysis.run_analysis_nodes_overlap_max( networkx_analysis.filter_species( graph_1),
networkx_analysis.filter_species( graph_2_no_isolated_nodes),
node_match = node_match_species,
export_results = export_results,
export_results_prefix = export_results_prefix,
ignore_existing = ignore_existing);
if node_match_reaction:
node_match_reaction = node_match_reaction[0]
networkx_analysis.run_analysis_nodes_overlap_max( networkx_analysis.filter_reactions( graph_1),
networkx_analysis.filter_reactions( graph_2),
node_match = node_match_reaction,
export_results = export_results,
export_results_prefix = export_results_prefix,
ignore_existing = ignore_existing);
if compute_overlap_for_no_isolated_nodes:
networkx_analysis.run_analysis_nodes_overlap_max( networkx_analysis.filter_reactions( graph_1),
networkx_analysis.filter_reactions( graph_2_no_isolated_nodes),
node_match = node_match_reaction,
export_results = export_results,
export_results_prefix = export_results_prefix,
ignore_existing = ignore_existing);
print( "Finished processing %s/%s %s" % ( dataset_1, dataset_2, node_match_fn_name))
def run_subgraphs_overlap( node_match_fn_name,
dataset_1 = "TARGET",
dataset_2 = "ANN",
export_results = True,
compute_overlap_for_with_isolated_nodes = False,
ignore_existing = True):
print( "Processing %s/%s" % ( dataset_1, dataset_2))
f1 = "%s%s.networkx.pickle" % (mtor_dir(), dataset_1)
print( "Loading %s at %s" % (dataset_1, f1))
graph_1 = networkx.read_gpickle( f1)
f2 = "%s%s.networkx.pickle" % (mtor_dir(), dataset_2)
print( "Loading %s at %s" % (dataset_2, f2))
graph_2 = networkx.read_gpickle( f2)
print( "Computing %s-NO-ISOLATED-NODES" % graph_2.name)
graph_2_no_isolated_nodes = networkx_analysis.filter_graph_remove_isolated_nodes( graph_2)
node_match = filter( lambda n: n.__name__ == node_match_fn_name, subgraphs_overlap_node_match_fns)[0]
#### SUBGRAPH OVERLAP
if compute_overlap_for_with_isolated_nodes:
networkx_analysis.run_analysis_subgraphs_overlap( graph_1, graph_2,
node_match = node_match,
export_results = export_results,
export_results_prefix = mtor_dir_results() + "results-subgraphs-overlap-max",
ignore_existing = ignore_existing)
networkx_analysis.run_analysis_subgraphs_overlap( graph_1, graph_2_no_isolated_nodes,
node_match = node_match,
export_results = export_results,
export_results_prefix = mtor_dir_results() + "results-subgraphs-overlap-max",
ignore_existing = ignore_existing)
print( "Finished processing %s/%s %s" % ( dataset_1, dataset_2, node_match_fn_name))
########################################################################
########################################################################
# node match plotting and printing
def print_AND_export_node_match_results( graph_1_name = "TARGET-SPECIES",
graph_2_name = "ANN-SPECIES",
node_match_names = ["nm_name_clean_approx", "nm_gene_id_intersect", "nm_bqbiol_is_equal", "nm_bqbiol_is_overlaps"]):
import pickle
for node_match_name in node_match_names:
file_name = "results-nodes-overlap-max__%s__%s__%s.pickle" % (graph_1_name, graph_2_name, node_match_name)
[graph_1, graph_2, matches] = pickle.load( open(file_name, "rb"))
networkx_analysis.print_node_match_result( graph_1, graph_2, matches, node_match_name = node_match_name,
export_matches = file_name + ".txt")
# print_AND_export_node_match_results()
# print_AND_export_node_match_results( graph_1_name = "TARGET-SPECIES", graph_2_name = "NLP-SPECIES", node_match_names = ["nm_bqbiol_is_equal", "nm_bqbiol_is_overlaps"])
def node_match_results_to_pandas_dataframe( sources = ["ANN", "DEFAULT", "DEFAULT+GE11+MTOR"]):
""" Exports species and reactions node match results as pandas dataframe """
print("Loading reaction node match data")
for source in sources:
node_match_reaction_data = []
for nm in nodes_overlap_reactions_nm_fns:
graph_1_name = "TARGET-REACTIONS"
graph_2_name = "%s-REACTIONS" % source
f = "%s__%s__%s__%s.pickle" % (mtor_dir_results() + "results-nodes-overlap-max", graph_1_name, graph_2_name, nm.__name__)
print( "Loading %s" % f)
[graph_1, graph_2, matches] = pickle.load( open( f, "rb"))
precision, recall, f_score = networkx_analysis.get_nodes_overlap_max_result_precision_recall_f_score( graph_1, graph_2, matches)
node_match_reaction_data.append( {"target" : graph_1_name, "source" : graph_2_name, "name" : nm.__name__ , "precision" : precision, "recall" : recall, "f-score" : f_score})
node_match_reaction_data = | pandas.DataFrame(node_match_reaction_data) | pandas.DataFrame |
from dataclasses import dataclass
import numpy as np
import pandas as pd
from os import path
from typing import Tuple
from osa import factory
from handlers.result import BaseResult
from handlers.file import get_setting, get_selected_file_path
from handlers.cache import load_only_array_results
@dataclass
class SaveData:
"""
Saves the Data from the Cache to the last file created in the saving_folder perform "get_data" before executing
"""
command: str
result: BaseResult
def do_work(self) -> BaseResult:
array_results: Tuple[np.array, np.array] = load_only_array_results()
if not array_results:
self._fail_result_no_data()
return self.result
try:
file_path = get_selected_file_path()
except FileNotFoundError:
self._fail_result_no_dir()
return self.result
if not file_path:
self._fail_result_no_file()
elif self._file_is_empty(file_path):
self._save_data(file_path, array_results)
self._success_result()
else:
self._append_and_save_data(file_path, array_results)
return self.result
def _save_data(self, file_path: str, array_results: tuple) -> None:
wavelength, trace = array_results
if self._is_points_data(wavelength):
self._save_as_points_data(file_path, wavelength, trace)
else:
self._save_as_trace_data(file_path, wavelength, trace)
def _append_and_save_data(self, file_path: str, array_result: tuple) -> None:
wavelength, trace = array_result
df = pd.read_csv(file_path, index_col=0)
if self._is_points_data(trace) and self._selected_file_is_points_data(file_path):
self._append_and_save_as_points_data(file_path, wavelength, trace)
self._success_result()
elif self._length_matches(df, trace) and not self._selected_file_is_points_data(file_path):
self._append_and_save_as_trace_data(df, file_path, trace)
self._success_result()
else:
self._fail_result_no_match(len(df), len(trace))
def _save_as_points_data(self, file_path, wavelength, trace):
df = self._create_points_df(wavelength, trace)
df.to_csv(file_path)
def _save_as_trace_data(self, file_path, wavelength, trace):
column_name = self._ask_column_name()
df = pd.DataFrame(data=trace, index=wavelength, columns=[column_name])
df.index.name = "wavelength [nm]"
df.to_csv(file_path)
def _append_and_save_as_points_data(self, file_path, wavelength, trace):
df = pd.read_csv(file_path, index_col=[0, 1])
new_df = self._create_points_df(wavelength, trace)
df = df.append(new_df)
df.to_csv(file_path)
def _append_and_save_as_trace_data(self, df: pd.DataFrame,
file_path: str,
trace: np.array):
column_name = self._ask_column_name()
df[column_name] = trace
df.to_csv(file_path)
def _create_points_df(self, wavelength, trace, ):
column_name = self._ask_column_name()
df = | pd.DataFrame(data=trace, columns=["trace [dBm]"]) | pandas.DataFrame |
# PyLS-PM Library
# Author: <NAME>
# Creation: November 2016
# Description: Library based on <NAME>'s simplePLS,
# <NAME>'s plspm and <NAME>'s matrixpls made in R
import pandas as pd
import numpy as np
import scipy as sp
import scipy.stats
from .qpLRlib4 import otimiza, plotaIC
import scipy.linalg
from collections import Counter
from .pca import *
from pandas.plotting import scatter_matrix
from .adequacy import *
class PyLSpm(object):
def PCA(self):
for i in range(self.lenlatent):
print(self.latent[i])
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
PCAdo(block, self.latent[i])
print('KMO')
print(KMO(block))
print('BTS')
print(BTS(block))
def scatterMatrix(self):
for i in range(1, self.lenlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
scatter_matrix(block, diagonal='kde')
plt.savefig('imgs/scatter' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
def sampleSize(self):
r = 0.3
alpha = 0.05
# power=0.9
C = 0.5 * np.log((1 + r) / (1 - r))
Za = scipy.stats.norm.ppf(1 - (0.05 / 2))
sizeArray = []
powerArray = []
power = 0.5
for i in range(50, 100, 1):
power = i / 100
powerArray.append(power)
Zb = scipy.stats.norm.ppf(1 - power)
N = abs((Za - Zb) / C)**2 + 3
sizeArray.append(N)
return [powerArray, sizeArray]
def normaliza(self, X):
correction = np.sqrt((len(X) - 1) / len(X)) # std factor corretion
mean_ = np.mean(X, 0)
scale_ = np.std(X, 0)
X = X - mean_
X = X / (scale_ * correction)
return X
def gof(self):
r2mean = np.mean(self.r2.T[self.endoexo()[0]].values)
AVEmean = self.AVE().copy()
totalblock = 0
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = len(block.columns.values)
totalblock += block
AVEmean[self.latent[i]] = AVEmean[self.latent[i]] * block
AVEmean = np.sum(AVEmean) / totalblock
return np.sqrt(AVEmean * r2mean)
def endoexo(self):
exoVar = []
endoVar = []
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
return endoVar, exoVar
def residuals(self):
exoVar = []
endoVar = []
outer_residuals = self.data.copy()
# comun_ = self.data.copy()
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = block.columns.values
loadings = self.outer_loadings.ix[
block][self.latent[i]].values
outer_ = self.fscores.ix[:, i].values
outer_ = outer_.reshape(len(outer_), 1)
loadings = loadings.reshape(len(loadings), 1)
outer_ = np.dot(outer_, loadings.T)
outer_residuals.ix[:, block] = self.data_.ix[
:, block] - outer_
# comun_.ix[:, block] = outer_
inner_residuals = self.fscores[endoVar]
inner_ = pd.DataFrame.dot(self.fscores, self.path_matrix.ix[endoVar].T)
inner_residuals = self.fscores[endoVar] - inner_
residuals = pd.concat([outer_residuals, inner_residuals], axis=1)
mean_ = np.mean(self.data, 0)
# comun_ = comun_.apply(lambda row: row + mean_, axis=1)
sumOuterResid = pd.DataFrame.sum(
pd.DataFrame.sum(outer_residuals**2))
sumInnerResid = pd.DataFrame.sum(
pd.DataFrame.sum(inner_residuals**2))
divFun = sumOuterResid + sumInnerResid
return residuals, outer_residuals, inner_residuals, divFun
def srmr(self):
srmr = (self.empirical() - self.implied())
srmr = np.sqrt(((srmr.values) ** 2).mean())
return srmr
def implied(self):
corLVs = pd.DataFrame.cov(self.fscores)
implied_ = pd.DataFrame.dot(self.outer_loadings, corLVs)
implied = pd.DataFrame.dot(implied_, self.outer_loadings.T)
implied.values[[np.arange(len(self.manifests))] * 2] = 1
return implied
def empirical(self):
empirical = self.data_
return pd.DataFrame.corr(empirical)
def frequency(self, data=None, manifests=None):
if data is None:
data = self.data
if manifests is None:
manifests = self.manifests
frequencia = pd.DataFrame(0, index=range(1, 6), columns=manifests)
for i in range(len(manifests)):
frequencia[manifests[i]] = data[
manifests[i]].value_counts()
frequencia = frequencia / len(data) * 100
frequencia = frequencia.reindex_axis(
sorted(frequencia.columns), axis=1)
frequencia = frequencia.fillna(0).T
frequencia = frequencia[(frequencia.T != 0).any()]
maximo = pd.DataFrame.max(pd.DataFrame.max(data, axis=0))
if int(maximo) & 1:
neg = np.sum(frequencia.ix[:, 1: ((maximo - 1) / 2)], axis=1)
ind = frequencia.ix[:, ((maximo + 1) / 2)]
pos = np.sum(
frequencia.ix[:, (((maximo + 1) / 2) + 1):maximo], axis=1)
else:
neg = np.sum(frequencia.ix[:, 1:((maximo) / 2)], axis=1)
ind = 0
pos = np.sum(frequencia.ix[:, (((maximo) / 2) + 1):maximo], axis=1)
frequencia['Neg.'] = pd.Series(
neg, index=frequencia.index)
frequencia['Ind.'] = pd.Series(
ind, index=frequencia.index)
frequencia['Pos.'] = pd.Series(
pos, index=frequencia.index)
return frequencia
def frequencyPlot(self, data_, SEM=None):
segmento = 'SEM'
SEMmax = pd.DataFrame.max(SEM)
ok = None
for i in range(1, self.lenlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = pd.concat([block, SEM], axis=1)
for j in range(SEMmax + 1):
dataSEM = (block.loc[data_[segmento] == j]
).drop(segmento, axis=1)
block_val = dataSEM.columns.values
dataSEM = self.frequency(dataSEM, block_val)['Pos.']
dataSEM = dataSEM.rename(j + 1)
ok = dataSEM if ok is None else pd.concat(
[ok, dataSEM], axis=1)
for i in range(1, self.lenlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
plotando = ok.ix[block_val].dropna(axis=1)
plotando.plot.bar()
plt.legend(loc='upper center',
bbox_to_anchor=(0.5, -.08), ncol=6)
plt.savefig('imgs/frequency' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
# plt.show()
# block.plot.bar()
# plt.show()
'''for i in range(1, self.lenlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
block = self.frequency(block, block_val)
block.plot.bar()
plt.show()'''
def dataInfo(self):
sd_ = np.std(self.data, 0)
mean_ = np.mean(self.data, 0)
skew = scipy.stats.skew(self.data)
kurtosis = scipy.stats.kurtosis(self.data)
w = [scipy.stats.shapiro(self.data.ix[:, i])[0]
for i in range(len(self.data.columns))]
return [mean_, sd_, skew, kurtosis, w]
def predict(self, method='redundancy'):
exoVar = []
endoVar = []
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
if (method == 'exogenous'):
Beta = self.path_matrix.ix[endoVar][endoVar]
Gamma = self.path_matrix.ix[endoVar][exoVar]
beta = [1 if (self.latent[i] in exoVar)
else 0 for i in range(self.lenlatent)]
beta = np.diag(beta)
beta_ = [1 for i in range(len(Beta))]
beta_ = np.diag(beta_)
beta = pd.DataFrame(beta, index=self.latent, columns=self.latent)
mid = pd.DataFrame.dot(Gamma.T, np.linalg.inv(beta_ - Beta.T))
mid = (mid.T.values).flatten('F')
k = 0
for j in range(len(exoVar)):
for i in range(len(endoVar)):
beta.ix[endoVar[i], exoVar[j]] = mid[k]
k += 1
elif (method == 'redundancy'):
beta = self.path_matrix.copy()
beta_ = pd.DataFrame(1, index=np.arange(
len(exoVar)), columns=np.arange(len(exoVar)))
beta.ix[exoVar, exoVar] = np.diag(np.diag(beta_.values))
elif (method == 'communality'):
beta = np.diag(np.ones(len(self.path_matrix)))
beta = pd.DataFrame(beta)
partial_ = pd.DataFrame.dot(self.outer_weights, beta.T.values)
prediction = pd.DataFrame.dot(partial_, self.outer_loadings.T.values)
predicted = pd.DataFrame.dot(self.data, prediction)
predicted.columns = self.manifests
mean_ = np.mean(self.data, 0)
intercept = mean_ - np.dot(mean_, prediction)
predictedData = predicted.apply(lambda row: row + intercept, axis=1)
return predictedData
def cr(self):
# Composite Reliability
composite = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = len(block.columns)
if(p != 1):
cor_mat = np.cov(block.T)
evals, evecs = np.linalg.eig(cor_mat)
U, S, V = np.linalg.svd(cor_mat, full_matrices=False)
indices = np.argsort(evals)
indices = indices[::-1]
evecs = evecs[:, indices]
evals = evals[indices]
loadings = V[0, :] * np.sqrt(evals[0])
numerador = np.sum(abs(loadings))**2
denominador = numerador + (p - np.sum(loadings ** 2))
cr = numerador / denominador
composite[self.latent[i]] = cr
else:
composite[self.latent[i]] = 1
composite = composite.T
return(composite)
def r2adjusted(self):
n = len(self.data_)
r2 = self.r2.values
r2adjusted = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
p = sum(self.LVariables['target'] == self.latent[i])
r2adjusted[self.latent[i]] = r2[i] - \
(p * (1 - r2[i])) / (n - p - 1)
return r2adjusted.T
def htmt(self):
htmt_ = pd.DataFrame(pd.DataFrame.corr(self.data_),
index=self.manifests, columns=self.manifests)
mean = []
allBlocks = []
for i in range(self.lenlatent):
block_ = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
allBlocks.append(list(block_.values))
block = htmt_.ix[block_, block_]
mean_ = (block - np.diag(np.diag(block))).values
mean_[mean_ == 0] = np.nan
mean.append(np.nanmean(mean_))
comb = [[k, j] for k in range(self.lenlatent)
for j in range(self.lenlatent)]
comb_ = [(np.sqrt(mean[comb[i][1]] * mean[comb[i][0]]))
for i in range(self.lenlatent ** 2)]
comb__ = []
for i in range(self.lenlatent ** 2):
block = (htmt_.ix[allBlocks[comb[i][1]],
allBlocks[comb[i][0]]]).values
# block[block == 1] = np.nan
comb__.append(np.nanmean(block))
htmt__ = np.divide(comb__, comb_)
where_are_NaNs = np.isnan(htmt__)
htmt__[where_are_NaNs] = 0
htmt = pd.DataFrame(np.tril(htmt__.reshape(
(self.lenlatent, self.lenlatent)), k=-1), index=self.latent, columns=self.latent)
return htmt
def comunalidades(self):
# Comunalidades
return self.outer_loadings**2
def AVE(self):
# AVE
return self.comunalidades().apply(lambda column: column.sum() / (column != 0).sum())
def fornell(self):
cor_ = pd.DataFrame.corr(self.fscores)**2
AVE = self.comunalidades().apply(lambda column: column.sum() / (column != 0).sum())
for i in range(len(cor_)):
cor_.ix[i, i] = AVE[i]
return(cor_)
def rhoA(self):
# rhoA
rhoA = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
weights = pd.DataFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).any()]
result = pd.DataFrame.dot(weights.T, weights)
result_ = pd.DataFrame.dot(weights, weights.T)
S = self.data_[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
S = pd.DataFrame.dot(S.T, S) / S.shape[0]
numerador = (
np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights))
denominador = (
(np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights)))
rhoA_ = ((result)**2) * (numerador / denominador)
if(np.isnan(rhoA_.values)):
rhoA[self.latent[i]] = 1
else:
rhoA[self.latent[i]] = rhoA_.values
return rhoA.T
def xloads(self):
# Xloadings
A = self.data_.transpose().values
B = self.fscores.transpose().values
A_mA = A - A.mean(1)[:, None]
B_mB = B - B.mean(1)[:, None]
ssA = (A_mA**2).sum(1)
ssB = (B_mB**2).sum(1)
xloads_ = (np.dot(A_mA, B_mB.T) /
np.sqrt(np.dot(ssA[:, None], ssB[None])))
xloads = pd.DataFrame(
xloads_, index=self.manifests, columns=self.latent)
return xloads
def corLVs(self):
# Correlations LVs
corLVs_ = np.tril(pd.DataFrame.corr(self.fscores))
return pd.DataFrame(corLVs_, index=self.latent, columns=self.latent)
def alpha(self):
# Cronbach Alpha
alpha = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = len(block.columns)
if(p != 1):
p_ = len(block)
correction = np.sqrt((p_ - 1) / p_)
soma = np.var(np.sum(block, axis=1))
cor_ = pd.DataFrame.corr(block)
denominador = soma * correction**2
numerador = 2 * np.sum(np.tril(cor_) - np.diag(np.diag(cor_)))
alpha_ = (numerador / denominador) * (p / (p - 1))
alpha[self.latent[i]] = alpha_
else:
alpha[self.latent[i]] = 1
return alpha.T
def vif(self):
vif = []
totalmanifests = range(len(self.data_.columns))
for i in range(len(totalmanifests)):
independent = [x for j, x in enumerate(totalmanifests) if j != i]
coef, resid = np.linalg.lstsq(
self.data_.ix[:, independent], self.data_.ix[:, i])[:2]
r2 = 1 - resid / \
(self.data_.ix[:, i].size * self.data_.ix[:, i].var())
vif.append(1 / (1 - r2))
vif = pd.DataFrame(vif, index=self.manifests)
return vif
def PLSc(self):
##################################################
# PLSc
rA = self.rhoA()
corFalse = self.corLVs()
for i in range(self.lenlatent):
for j in range(self.lenlatent):
if i == j:
corFalse.ix[i][j] = 1
else:
corFalse.ix[i][j] = corFalse.ix[i][
j] / np.sqrt(rA.ix[self.latent[i]] * rA.ix[self.latent[j]])
corTrue = np.zeros([self.lenlatent, self.lenlatent])
for i in range(self.lenlatent):
for j in range(self.lenlatent):
corTrue[j][i] = corFalse.ix[i][j]
corTrue[i][j] = corFalse.ix[i][j]
corTrue = pd.DataFrame(corTrue, corFalse.columns, corFalse.index)
# Loadings
attenuedOuter_loadings = pd.DataFrame(
0, index=self.manifests, columns=self.latent)
for i in range(self.lenlatent):
weights = pd.DataFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).any()]
result = pd.DataFrame.dot(weights.T, weights)
result_ = pd.DataFrame.dot(weights, weights.T)
newLoad = (
weights.values * np.sqrt(rA.ix[self.latent[i]].values)) / (result.values)
myindex = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
myindex_ = self.latent[i]
attenuedOuter_loadings.ix[myindex.values, myindex_] = newLoad
# Path
dependent = np.unique(self.LVariables.ix[:, 'target'])
for i in range(len(dependent)):
independent = self.LVariables[self.LVariables.ix[
:, "target"] == dependent[i]]["source"]
dependent_ = corTrue.ix[dependent[i], independent]
independent_ = corTrue.ix[independent, independent]
# path = np.dot(np.linalg.inv(independent_),dependent_)
coef, resid = np.linalg.lstsq(independent_, dependent_)[:2]
self.path_matrix.ix[dependent[i], independent] = coef
return attenuedOuter_loadings
# End PLSc
##################################################
def __init__(self, dados, LVcsv, Mcsv, scheme='path', regression='ols', h=0, maximo=300,
stopCrit=7, HOC='false', disattenuate='false', method='lohmoller'):
self.data = dados
self.LVcsv = LVcsv
self.Mcsv = Mcsv
self.maximo = maximo
self.stopCriterion = stopCrit
self.h = h
self.scheme = scheme
self.regression = regression
self.disattenuate = disattenuate
contador = 0
self.convergiu = 0
data = dados if type(
dados) is pd.core.frame.DataFrame else pd.read_csv(dados)
LVariables = pd.read_csv(LVcsv)
Variables = Mcsv if type(
Mcsv) is pd.core.frame.DataFrame else | pd.read_csv(Mcsv) | pandas.read_csv |
# Encoding: utf-8
""" SpineSession class
Spine session is used to load metadata information created using matlab SpineImaging class
It has load and save capabilities
"""
import copy
import io
import json
import os
import sys
import pickle
import logging
from builtins import input
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyspark
import thunder as td
import requests
from .IO import loadmat
from .Session import Session
from .Utils import searchTiffInfo
logger = logging.getLogger(__name__)
logger.handlers = []
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging.Formatter("%(name)s @ %(asctime)s - [%(levelname)s] %(module)s::%(funcName)s: %(message)s"))
ch.setLevel(logging.INFO)
logger.addHandler(ch)
class SpineSession(Session):
"""Session object used to analyze spine imaging data
"""
def __init__(self, basePath='/groups/svoboda/svobodalab/users/Aaron', animalID=None, date=None, run=None,
verbose=True, nPlanes=0):
""" Initialize a new SpineSession object.
Assumes a format of BASE\YYMMDD_animalID\RUN
If no input is given, returns an empty object
:param basePath: where the base folder is in
:param animalID: such as WR34
:param date: as YYMMDD format
:param run: name of run
:param verbose: Use logger
:param nPlanes number of planes
"""
super(SpineSession, self).__init__(basePath, animalID, date, run, nPlanes=nPlanes, verbose=verbose)
if animalID and date and run:
logger.info('SpineSession initialized, path = ' + self.path)
def __repr__(self):
if self.animalID and self.date and self.run:
return 'SpineSession: animal: %s, date: %s, run: %s' % (self.animalID, self.date, self.run)
else:
return 'SpineSession object'
def display(self):
""" method to print session info """
super(SpineSession, self).display()
if hasattr(self, 'ZError'):
Last = self.TrueFieldsCenterSamp[-1]
Errors = self.ZError[self.TrueFieldsCenterSamp]
fig = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(self.ZError[0:Last + 10])
plt.plot(self.TrueFieldsCenterSamp, Errors, 'o')
plt.legend(['Raw error', 'Scan fields'])
plt.ylabel('Error (um)')
plt.xlabel('Lines')
plt.title('Z errors')
self.ZErrorFig = fig
def initBase(self, sc=None, xScale=1.13, yScale=1.13, nPartitions=None, xScaleAnat=1.0, yScaleAnat=1.0,
baseFlyLines=4, pixSizeZ=2500, flyVelocity=266):
""" initialize properties of the session
:param sc: SparkContext
:param xScale: expansion factor for pixel in X
:param yScale: expansion factor for pixel in X
:param nPartitions: number of partitions to load the data, if None will take sc.defaultParallelism
:param flyVelocity: X, Y galvos speed in um/ms
:param xScaleAnat: scaling factor for stack in X
:param yScaleAnat: scaling factor for stack in Y
:param baseFlyLines: number of fly lines to remove in any case
:param pixSizeZ: PSF FWHM in Z in nm
"""
super(SpineSession, self).initBase(sc, nPartitions, flyVelocity)
self.xScale = xScale
self.yScale = yScale
self.xScaleAnat = xScaleAnat
self.yScaleAnat = yScaleAnat
self.pixSizeZ = pixSizeZ
self.baseFlyLines = baseFlyLines
self.getSpMat()
if hasattr(self, 'xSize'):
self.pixSizeXY = 1.0 / (self.xSize / self.xSizeOrig) * xScale * 1000 # in nm!!!
self.setFlyLines(self.baseFlyLines)
else:
self.pixSizeXY = None
self.getMeta(from_web=True)
def getMeta(self, filename='/Database/Sessions.csv', from_web=False,
key='<KEY>', gid='78168970'):
"""
:param from_web: if true will go to public google sheet, if false will go to session.basePath + filename
:param filename: local file to look for
:param key: google sheet key
:param gid: google sheet gid
:return: metadata from the session
"""
if from_web:
response = requests.get('https://docs.google.com/spreadsheet/ccc?key=' + key + '&output=csv&gid=' + gid)
assert response.status_code == 200, 'Wrong status code'
f = io.StringIO(response.content.decode('utf-8'))
sessionsDF = | pd.read_csv(f) | pandas.read_csv |
# %% [markdown]
# # 📃 Solution for Exercise 02
#
# This notebook aims at building baseline classifiers, which we'll use to
# compare our predictive model. Besides, we will check the differences with
# the baselines that we saw in regression.
#
# We will use the adult census dataset, using only the numerical features.
# %%
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census-numeric-all.csv")
data, target = adult_census.drop(columns="class"), adult_census["class"]
# %% [markdown]
# First, define a `ShuffleSplit` cross-validation strategy taking half of the
# sample as a testing at each round.
# %%
from sklearn.model_selection import ShuffleSplit
cv = ShuffleSplit(n_splits=10, test_size=0.5, random_state=0)
# %% [markdown]
# Next, create a machine learning pipeline composed of a transformer to
# standardize the data followed by a logistic regression.
# %%
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
classifier = make_pipeline(StandardScaler(), LogisticRegression())
# %% [markdown]
# Get the test score by using the model, the data, and the cross-validation
# strategy that you defined above.
# %%
from sklearn.model_selection import cross_validate
result_classifier = cross_validate(classifier, data, target, cv=cv, n_jobs=-1)
test_score_classifier = pd.Series(
result_classifier["test_score"], name="Classifier score")
# %% [markdown]
# Using the `sklearn.model_selection.permutation_test_score` function,
# check the chance level of the previous model.
# %%
from sklearn.model_selection import permutation_test_score
score, permutation_score, pvalue = permutation_test_score(
classifier, data, target, cv=cv, n_jobs=-1, n_permutations=10)
test_score_permutation = pd.Series(permutation_score, name="Permuted score")
# %% [markdown]
# Finally, compute the test score of a dummy classifier which would predict
# the most frequent class from the training set. You can look at the
# `sklearn.dummy.DummyClassifier` class.
# %%
from sklearn.dummy import DummyClassifier
dummy = DummyClassifier(strategy="most_frequent")
result_dummy = cross_validate(dummy, data, target, cv=cv, n_jobs=-1)
test_score_dummy = | pd.Series(result_dummy["test_score"], name="Dummy score") | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # Polifact_Analysis
#
# ### @Author : <NAME>
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
import plotly.express as px
from scipy import signal
import warnings
warnings.filterwarnings("ignore")
#to make shell more intractive
from IPython.display import display
from IPython.display import Image
# setting up the chart size and background
plt.rcParams['figure.figsize'] = (16, 8)
plt.style.use('fivethirtyeight')
# In[2]:
pwd
# In[3]:
path ='E:\\DataScience\\MachineLearning\\Polotifact_Data'
# In[4]:
import os
from glob import glob
os.listdir(path)
# In[5]:
df = pd.read_csv(path+"\\politifact.csv")
df.head(5)
# In[6]:
| pd.set_option('display.max_colwidth', 200) | pandas.set_option |
# Parameters
XGB_WEIGHT = 0.6200
BASELINE_WEIGHT = 0.0200
OLS_WEIGHT = 0.0700
NN_WEIGHT = 0.0600
XGB1_WEIGHT = 0.8000 # Weight of first in combination of two XGB models
BASELINE_PRED = 0.0115 # Baseline based on mean of training data, per Oleg
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
import gc
from sklearn.linear_model import LinearRegression
import random
import datetime as dt
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout, BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.optimizers import Adam
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Imputer
##### READ IN RAW DATA
print( "\nReading data from disk ...")
prop = pd.read_csv('../input/properties_2016.csv')
train = | pd.read_csv("../input/train_2016_v2.csv") | pandas.read_csv |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.