prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from PyQt5.QtWidgets import QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QAbstractItemView, QGroupBox, \
QMessageBox, QDesktopWidget, QListWidget, QListWidgetItem, QPushButton, QHeaderView
from PyQt5.QtCore import pyqtSignal
from .custom_widgets import QTableWidgetPandasDF
import pandas as pd
class ILTISTransferDialog(QMainWindow):
send_data_signal = pyqtSignal(list, list, pd.DataFrame, name="send data")
def __init__(self, data_loaded_df, metadata_to_choose_from):
super().__init__()
self.data_loaded_df = data_loaded_df
centralWidget = QWidget(self)
main_vbox = QVBoxLayout(centralWidget)
self.table = QTableWidgetPandasDF(self)
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.setSelectionMode(QAbstractItemView.MultiSelection)
self.table.refresh(data_loaded_df)
main_vbox.addWidget(self.table)
metadata_choice_box = QGroupBox("Select one or more metadata that will be used to "
"construct the dataset name in iltis")
metadata_choice_vboxlayout = QVBoxLayout(metadata_choice_box)
self.metadata_choice_list = QTableWidgetPandasDF(self)
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.metadata_choice_list.setSelectionMode(QAbstractItemView.MultiSelection)
self.metadata_choice_list.refresh(
|
pd.DataFrame.from_dict({"Metadata to choose from": metadata_to_choose_from})
|
pandas.DataFrame.from_dict
|
import numpy as np
from statsmodels.genmod.bayes_mixed_glm import (BinomialBayesMixedGLM,
PoissonBayesMixedGLM)
import pandas as pd
from scipy import sparse
from numpy.testing import assert_allclose, assert_equal
from scipy.optimize import approx_fprime
def gen_simple_logit(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc * cs, 2))
vc = s * np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[1, -1]) + np.dot(exog_vc, vc)
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(nc, dtype=np.int)
return y, exog_fe, exog_vc, ident
def gen_simple_poisson(nc, cs, s):
np.random.seed(3799)
exog_vc = np.kron(np.eye(nc), np.ones((cs, 1)))
exog_fe = np.random.normal(size=(nc * cs, 2))
vc = s * np.random.normal(size=nc)
lp = np.dot(exog_fe, np.r_[0.1, -0.1]) + np.dot(exog_vc, vc)
r = np.exp(lp)
y = np.random.poisson(r)
ident = np.zeros(nc, dtype=np.int)
return y, exog_fe, exog_vc, ident
def gen_crossed_logit(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.eye(nc), np.ones((cs, 1)))
b = np.kron(np.ones((cs, 1)), np.eye(nc))
exog_vc = np.concatenate((a, b), axis=1)
exog_fe = np.random.normal(size=(nc * cs, 1))
vc = s1 * np.random.normal(size=2 * nc)
vc[nc:] *= s2 / s1
lp = np.dot(exog_fe, np.r_[-0.5]) + np.dot(exog_vc, vc)
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(2 * nc, dtype=np.int)
ident[nc:] = 1
return y, exog_fe, exog_vc, ident
def gen_crossed_poisson(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.eye(nc), np.ones((cs, 1)))
b = np.kron(np.ones((cs, 1)), np.eye(nc))
exog_vc = np.concatenate((a, b), axis=1)
exog_fe = np.random.normal(size=(nc * cs, 1))
vc = s1 * np.random.normal(size=2 * nc)
vc[nc:] *= s2 / s1
lp = np.dot(exog_fe, np.r_[-0.5]) + np.dot(exog_vc, vc)
r = np.exp(lp)
y = np.random.poisson(r)
ident = np.zeros(2 * nc, dtype=np.int)
ident[nc:] = 1
return y, exog_fe, exog_vc, ident
def gen_crossed_logit_pandas(nc, cs, s1, s2):
np.random.seed(3799)
a = np.kron(np.arange(nc), np.ones(cs))
b = np.kron(np.ones(cs), np.arange(nc))
fe = np.ones(nc * cs)
vc = np.zeros(nc * cs)
for i in np.unique(a):
ii = np.flatnonzero(a == i)
vc[ii] += s1 * np.random.normal()
for i in np.unique(b):
ii = np.flatnonzero(b == i)
vc[ii] += s2 * np.random.normal()
lp = -0.5 * fe + vc
pr = 1 / (1 + np.exp(-lp))
y = 1 * (np.random.uniform(size=nc * cs) < pr)
ident = np.zeros(2 * nc, dtype=np.int)
ident[nc:] = 1
df = pd.DataFrame({"fe": fe, "a": a, "b": b, "y": y})
return df
def test_simple_logit_map():
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-3)
# Test the predict method
for linear in False, True:
for exog in None, exog_fe:
pr1 = rslt.predict(linear=linear, exog=exog)
pr2 = glmm.predict(rslt.params, linear=linear, exog=exog)
assert_allclose(pr1, pr2)
if not linear:
assert_equal(pr1.min() >= 0, True)
assert_equal(pr1.max() <= 1, True)
def test_simple_poisson_map():
y, exog_fe, exog_vc, ident = gen_simple_poisson(10, 10, 0.2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
assert_allclose(
glmm1.logposterior_grad(rslt1.params),
np.zeros_like(rslt1.params),
atol=1e-3)
# This should give the same answer as above
glmm2 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt2 = glmm2.fit_map()
assert_allclose(rslt1.params, rslt2.params, atol=1e-4)
# Test the predict method
for linear in False, True:
for exog in None, exog_fe:
pr1 = rslt1.predict(linear=linear, exog=exog)
pr2 = rslt2.predict(linear=linear, exog=exog)
pr3 = glmm1.predict(rslt1.params, linear=linear, exog=exog)
pr4 = glmm2.predict(rslt2.params, linear=linear, exog=exog)
assert_allclose(pr1, pr2, rtol=1e-5)
assert_allclose(pr2, pr3, rtol=1e-5)
assert_allclose(pr3, pr4, rtol=1e-5)
if not linear:
assert_equal(pr1.min() >= 0, True)
assert_equal(pr2.min() >= 0, True)
assert_equal(pr3.min() >= 0, True)
# Check dimensions and PSD status of cov_params
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_crossed_logit_map():
y, exog_fe, exog_vc, ident = gen_crossed_logit(10, 10, 1, 2)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
# Check dimensions and PSD status of cov_params
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_crossed_poisson_map():
y, exog_fe, exog_vc, ident = gen_crossed_poisson(10, 10, 1, 1)
exog_vc = sparse.csr_matrix(exog_vc)
glmm = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
# Check dimensions and PSD status of cov_params
cp = rslt.cov_params()
p = len(rslt.params)
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
def test_logit_map_crossed_formula():
data = gen_crossed_logit_pandas(10, 10, 1, 0.5)
fml = "y ~ fe"
fml_vc = {"a": "0 + C(a)", "b": "0 + C(b)"}
glmm = BinomialBayesMixedGLM.from_formula(fml, fml_vc, data, vcp_p=0.5)
rslt = glmm.fit_map()
assert_allclose(
glmm.logposterior_grad(rslt.params),
np.zeros_like(rslt.params),
atol=1e-4)
rslt.summary()
r = rslt.random_effects("a")
assert_allclose(
r.iloc[0, :].values, np.r_[-0.02004904, 0.094014], atol=1e-4)
# Check dimensions and PSD status of cov_params
cm = rslt.cov_params()
p = rslt.params.shape[0]
assert_equal(list(cm.shape), [p, p])
np.linalg.cholesky(cm)
def test_elbo_grad():
for f in range(2):
for j in range(2):
if f == 0:
if j == 0:
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 2)
else:
y, exog_fe, exog_vc, ident = gen_crossed_logit(
10, 10, 1, 2)
elif f == 1:
if j == 0:
y, exog_fe, exog_vc, ident = gen_simple_poisson(
10, 10, 0.5)
else:
y, exog_fe, exog_vc, ident = gen_crossed_poisson(
10, 10, 1, 0.5)
exog_vc = sparse.csr_matrix(exog_vc)
if f == 0:
glmm1 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5)
else:
glmm1 = PoissonBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
for k in range(3):
if k == 0:
vb_mean = rslt1.params
vb_sd = np.ones_like(vb_mean)
elif k == 1:
vb_mean = np.zeros(len(vb_mean))
vb_sd = np.ones_like(vb_mean)
else:
vb_mean = np.random.normal(size=len(vb_mean))
vb_sd = np.random.uniform(1, 2, size=len(vb_mean))
mean_grad, sd_grad = glmm1.vb_elbo_grad(vb_mean, vb_sd)
def elbo(vec):
n = len(vec) // 2
return glmm1.vb_elbo(vec[:n], vec[n:])
x = np.concatenate((vb_mean, vb_sd))
g1 = approx_fprime(x, elbo, 1e-5)
n = len(x) // 2
mean_grad_n = g1[:n]
sd_grad_n = g1[n:]
assert_allclose(mean_grad, mean_grad_n, atol=1e-2, rtol=1e-2)
assert_allclose(sd_grad, sd_grad_n, atol=1e-2, rtol=1e-2)
def test_simple_logit_vb():
y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 0)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt2 = glmm2.fit_vb(rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[0.75330405, -0.71643228, -2.49091288, -0.00959806, 0.00450254],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[0.79338836, -0.7599833, -0.64149356, -0.24772884, 0.10775366],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_simple_poisson_vb():
y, exog_fe, exog_vc, ident = gen_simple_poisson(10, 10, 1)
exog_vc = sparse.csr_matrix(exog_vc)
glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident, vcp_p=0.5)
rslt2 = glmm2.fit_vb(rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[-0.07233493, -0.06706505, -0.47159649, 1.12575122, -1.02442201],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt1.cov_params().flat[0:5],
np.r_[0.00790914, 0.00080666, -0.00050719, 0.00022648, 0.00046235],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[-0.07088814, -0.06373107, -0.22770786, 1.12923746, -1.26161339],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.cov_params()[0:5],
np.r_[0.00747782, 0.0092554, 0.04508904, 0.02934488, 0.20312746],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_crossed_logit_vb():
y, exog_fe, exog_vc, ident = gen_crossed_logit(10, 10, 1, 2)
glmm1 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = BinomialBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt2 = glmm2.fit_vb(mean=rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[-5.43073978e-01, -2.46197518e+00, -2.36582801e+00,
-9.64030461e-03, 2.32701078e-03],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt1.cov_params().flat[0:5],
np.r_[4.12927123e-02, -2.04448923e-04, 4.64829219e-05, 1.20377543e-04,
-1.45003234e-04],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[-0.70834417, -0.3571011, 0.19126823, -0.36074489, 0.058976],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.cov_params()[0:5],
np.r_[0.05212492, 0.04729656, 0.03916944, 0.25921842, 0.25782576],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_crossed_logit_vb_formula():
data = gen_crossed_logit_pandas(10, 10, 1, 2)
fml = "y ~ fe"
fml_vc = {"a": "0 + C(a)", "b": "0 + C(b)"}
glmm1 = BinomialBayesMixedGLM.from_formula(fml, fml_vc, data, vcp_p=0.5)
rslt1 = glmm1.fit_vb()
glmm2 = BinomialBayesMixedGLM(
glmm1.endog, glmm1.exog, glmm1.exog_vc, glmm1.ident, vcp_p=0.5)
rslt2 = glmm2.fit_vb()
assert_allclose(rslt1.params, rslt2.params, atol=1e-4)
rslt1.summary()
rslt2.summary()
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_crossed_poisson_vb():
y, exog_fe, exog_vc, ident = gen_crossed_poisson(10, 10, 1, 0.5)
glmm1 = PoissonBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt1 = glmm1.fit_map()
glmm2 = PoissonBayesMixedGLM(
y, exog_fe, exog_vc, ident, vcp_p=0.5, fe_p=0.5)
rslt2 = glmm2.fit_vb(mean=rslt1.params)
rslt1.summary()
rslt2.summary()
assert_allclose(
rslt1.params[0:5],
np.r_[-0.54855281, 0.10458834, -0.68777741, -0.01699925, 0.77200546],
rtol=1e-4,
atol=1e-4)
assert_allclose(
rslt2.params[0:5],
np.r_[-0.54691502, 0.22297158, -0.52673802, -0.06218684, 0.74385237],
rtol=1e-4,
atol=1e-4)
for rslt in rslt1, rslt2:
cp = rslt.cov_params()
p = len(rslt.params)
if rslt is rslt1:
assert_equal(cp.shape, np.r_[p, p])
np.linalg.cholesky(cp)
else:
assert_equal(cp.shape, np.r_[p,])
assert_equal(cp > 0, True*np.ones(p))
def test_poisson_formula():
y, exog_fe, exog_vc, ident = gen_crossed_poisson(10, 10, 1, 0.5)
for vb in False, True:
glmm1 = PoissonBayesMixedGLM(
y, exog_fe, exog_vc, ident)
if vb:
rslt1 = glmm1.fit_vb()
else:
rslt1 = glmm1.fit_map()
# Build categorical variables that match exog_vc
df =
|
pd.DataFrame({"y": y, "x1": exog_fe[:, 0]})
|
pandas.DataFrame
|
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from sklearn.metrics import r2_score
import seaborn as sns
import json
plt.rcParams.update({'font.size': 16})
# %%
fig_dir = r'figures/Classics/'
# %%
class CONSTANTS():
def __init__(self):
self.crab_red = '#f2636e'
self.dense_blue = '#2c2cd5'
self.colors = list(sns.color_palette("Set1", n_colors=7, desat=0.5))
self.markers = ['o', 'x', 's', '^', 'D', 'P', '1', '2', '3',
'4', 'p', '*', 'h', 'H', '+', 'd',
'|', '_']
self.eps = ['oliynyk',
'jarvis',
'mat2vec',
'onehot',
'magpie']
self.mps = ['ael_shear_modulus_vrh',
'energy_atom',
'agl_log10_thermal_expansion_300K',
'agl_thermal_conductivity_300K',
'Egap',
'ael_debye_temperature',
'ael_bulk_modulus_vrh']
self.mp_names = ['Log shear modulus',
'Ab initio energy per atom',
'Log thermal expansion',
'Log thermal conductivity',
'Band gap',
'Debye temperature',
'Bulk modulus']
self.mp_names_dict = dict(zip(self.mps, self.mp_names))
self.mp_units_dict = {'energy_atom': 'eV/atom',
'ael_shear_modulus_vrh': 'GPa',
'ael_bulk_modulus_vrh': 'GPa',
'ael_debye_temperature': 'K',
'Egap': 'eV',
'agl_thermal_conductivity_300K': 'W/m*K',
'agl_log10_thermal_expansion_300K': '1/K'}
self.mp_sym_dict = {'energy_atom': '$E_{atom}$',
'ael_shear_modulus_vrh': '$G$',
'ael_bulk_modulus_vrh': '$B$',
'ael_debye_temperature': '$\\theta_D$',
'Egap': '$E_g$',
'agl_thermal_conductivity_300K': '$\\kappa$',
'agl_log10_thermal_expansion_300K': '$\\alpha$'}
self.classic_models_dict = {'Ridge': 'Ridge',
'SGDRegressor': 'SGD',
'ExtraTreesRegressor': 'ExtraTrees',
'RandomForestRegressor': 'RF',
'AdaBoostRegressor': 'AdaBoost',
'GradientBoostingRegressor': 'GradBoost',
'KNeighborsRegressor': 'kNN',
'SVR': 'SVR',
'lSVR': 'lSVR'}
# %%
def count_parameters(model):
return sum(p.numel() for p in model.parameters()if p.requires_grad)
# %%
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.float):
return float(obj)
return json.JSONEncoder.default(self, obj)
# %%
def xstr(s):
if s is None:
return ''
else:
return f'seed{str(s)}'
def xstrh(s):
if s is None:
return ''
else:
return xstr(f'{s}-')
# %%
def get_path(score_summary_dir, filename):
path = os.path.join(score_summary_dir, filename)
return path
def load_df(path):
df =
|
pd.read_csv(path)
|
pandas.read_csv
|
import os
import json
import pandas as pd
import numpy as np
from keras.preprocessing.text import Tokenizer
|
pd.set_option('display.max_columns', None)
|
pandas.set_option
|
#!/usr/bin/env python
"""
@author: mmsa12
"""
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from collections import Counter
import string, nltk
from nltk.corpus import stopwords
import urllib.request, json
nltk.download('wordnet')
nltk.download('punkt')
nltk.download('stopwords')
def process_message(message):
words = nltk.word_tokenize(message.lower())
words = [w for w in words if len(w) > 3] # remove small words
# stop words
sw = stopwords.words('english')
words = [word for word in words if word.isnumeric() == False]
words = [word for word in words if word not in sw]
# remove punctuations
words = [word for word in words if word not in string.punctuation]
return ' '.join(words)
def get_emotions(self):
# get emotions
em_list = []
em_dict = dict()
em_frequencies = Counter()
lexicon_keys = self.lexicon.keys()
for word in self.words:
if word in lexicon_keys:
em_list.extend(self.lexicon[word])
em_dict.update({word: self.lexicon[word]})
for word in em_list:
em_frequencies[word] += 1
sum_values = sum(em_frequencies.values())
em_percent = {'fear': 0.0, 'anger': 0.0, 'anticipation': 0.0, 'trust': 0.0, 'surprise': 0.0, 'positive': 0.0,
'negative': 0.0, 'sadness': 0.0, 'disgust': 0.0, 'joy': 0.0}
for key in em_frequencies.keys():
em_percent.update({key: float(em_frequencies[key]) / float(sum_values)})
self.em_list = em_list
self.em_dict = em_dict
self.emotion_scores = dict(em_frequencies)
self.em_frequencies = em_percent
class EmoTFIDF:
"""Lexicon source is (C) 2016 National Research Council Canada (NRC) and library is for research purposes only. Source: http://sentiment.nrc.ca/lexicons-for-research/"""
with urllib.request.urlopen("https://raw.githubusercontent.com/mmsa/EmoTFIDF/main/emotions_lex.json") as url:
lexicon = json.loads(url.read().decode())
def set_text(self, text):
self.text = process_message(text)
self.words = list(nltk.word_tokenize(self.text))
self.sentences = list(nltk.sent_tokenize(self.text))
get_emotions(self)
def set_lexicon_path(self,path):
self.path = path
if path!=' ' and path!=0:
with open(path) as jsonfile:
self.lexicon = json.load(jsonfile)
else:
with urllib.request.urlopen("https://raw.githubusercontent.com/mmsa/EmoTFIDF/main/emotions_lex.json") as url:
self.lexicon = json.loads(url.read().decode())
def computeTFIDF(self, docs):
vectorizer = TfidfVectorizer(max_features=200, stop_words=stopwords.words('english'),
token_pattern=r'(?u)\b[A-Za-z]+\b')
vectors = vectorizer.fit_transform(docs)
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
df =
|
pd.DataFrame(denselist, columns=feature_names)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import os
from time import sleep
import pathlib
import dask
import dask.dataframe as dd
from dask.utils import tmpfile, tmpdir, dependency_depth
from dask.dataframe.utils import assert_eq
def test_to_hdf():
pytest.importorskip("tables")
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]
)
a = dd.from_pandas(df, 2)
with tmpfile("h5") as fn:
a.to_hdf(fn, "/data")
out = pd.read_hdf(fn, "/data")
tm.assert_frame_equal(df, out[:])
with tmpfile("h5") as fn:
a.x.to_hdf(fn, "/data")
out = pd.read_hdf(fn, "/data")
tm.assert_series_equal(df.x, out[:])
a = dd.from_pandas(df, 1)
with tmpfile("h5") as fn:
a.to_hdf(fn, "/data")
out = pd.read_hdf(fn, "/data")
tm.assert_frame_equal(df, out[:])
# test compute = False
with tmpfile("h5") as fn:
r = a.to_hdf(fn, "/data", compute=False)
r.compute()
out = pd.read_hdf(fn, "/data")
tm.assert_frame_equal(df, out[:])
def test_to_hdf_multiple_nodes():
pytest.importorskip("tables")
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]
)
a = dd.from_pandas(df, 2)
df16 = pd.DataFrame(
{
"x": [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
],
"y": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
},
index=[
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
16.0,
],
)
b = dd.from_pandas(df16, 16)
# saving to multiple nodes
with tmpfile("h5") as fn:
a.to_hdf(fn, "/data*")
out = dd.read_hdf(fn, "/data*")
assert_eq(df, out)
# saving to multiple nodes making sure order is kept
with tmpfile("h5") as fn:
b.to_hdf(fn, "/data*")
out = dd.read_hdf(fn, "/data*")
assert_eq(df16, out)
# saving to multiple datasets with custom name_function
with tmpfile("h5") as fn:
a.to_hdf(fn, "/data_*", name_function=lambda i: "a" * (i + 1))
out = dd.read_hdf(fn, "/data_*")
assert_eq(df, out)
out = pd.read_hdf(fn, "/data_a")
tm.assert_frame_equal(out, df.iloc[:2])
out = pd.read_hdf(fn, "/data_aa")
tm.assert_frame_equal(out, df.iloc[2:])
# test multiple nodes with hdf object
with tmpfile("h5") as fn:
with
|
pd.HDFStore(fn)
|
pandas.HDFStore
|
import pandas as pd
import pickle
import gzip
from calculate_errors import get_file
from tqdm import trange
import numpy as np
from sample import get_id, search
def get_csv(i: int, path: str):
return get_file(i, path + '/temp').replace('data', 'errors').replace('pickle.gz', 'csv')
def main():
PATH = 'active_learning'
UNPREPARED_PATH = 'unprepared_volume_data'
# Start by loading the errors file
errors =
|
pd.DataFrame(columns=['batch_ids', 'errors'])
|
pandas.DataFrame
|
from datetime import datetime
from pandas.api.types import is_datetime64_any_dtype
from pandas.api.types import is_period_dtype
from pandas.core.common import flatten
from functools import wraps
from copy import deepcopy
import logging
import numpy as np
import pandas as pd
import re
from typing import (
Any,
Callable,
Dict,
Hashable,
Iterable,
List,
NamedTuple,
Optional,
Pattern,
Set,
Tuple,
Union,
)
logger = logging.getLogger(__name__)
# across {{{1
def across(df: pd.DataFrame,
columns: Union[str, Tuple[str], List[str]] = None,
function: Callable = None,
series_obj: bool = False,
*args, **kwargs) -> pd.DataFrame:
'''Apply function across multiple columns
Across allows you to apply a function across a number of columns in one
statement. Functions can be applied to series values (via apply()) or access
pd.Series object methods.
In pandas, to apply the same function (on a Series/columns' values) you
would normally do something like this:
.. code-block::
df['column'].apply(function)
df['column2'].apply(function)
df['column3'].apply(function)
Piper equivalent would be:
.. code-block::
across(df, ['column1', 'column2', 'column3'], function)
You can also work with Series object functions by passing keyword
series_obj=True. In Pandas, if you wanted to change the dtype of a column
you would use something like:
.. code-block::
df['col'] = df['col'].astype(float)
df['col2'] = df['col2'].astype(float)
df['col3'] = df['col3'].astype(float)
The equivalent with across would be:
.. code-block::
df = across(df, ['col', 'col2', 'col3'], function=lambda x: x.astype(float))
Parameters
----------
df
pandas dataframe
columns
column(s) to apply function.
- If a list is provided, only the columns listed are affected by the
function.
- If a tuple is supplied, the first and second values will
correspond to the from and to column(s) range used to apply the function to.
function
function to be called.
series_obj
Default is False.
True - Function applied at Series or (DataFrame) 'object' level.
False - Function applied to each Series row values.
Returns
-------
A pandas dataframe
Examples
--------
See below, example apply a function applied to each of the columns row
values.
.. code-block:: python
%%piper
sample_data()
>> across(['dates', 'order_dates'], to_julian)
# Alternative syntax, passing a lambda...
>> across(['order_dates', 'dates'], function=lambda x: to_julian(x), series_obj=False)
>> head(tablefmt='plain')
dates order_dates countries regions ids values_1 values_2
0 120001 120007 Italy East A 311 26
1 120002 120008 Portugal South D 150 375
2 120003 120009 Spain East A 396 88
3 120004 120010 Italy East B 319 233
.. code-block:: python
%%piper
sample_data()
>> across(['dates', 'order_dates'], fiscal_year, year_only=True)
>> head(tablefmt='plain')
dates order_dates countries regions ids values_1 values_2
0 FY 19/20 FY 19/20 Italy East A 311 26
1 FY 19/20 FY 19/20 Portugal South D 150 375
2 FY 19/20 FY 19/20 Spain East A 396 88
3 FY 19/20 FY 19/20 Italy East B 319 233
Accessing Series object methods - by passing series_obj=True you can also
manipulate series object and string vectorized functions (e.g. pd.Series.str.replace())
.. code-block:: python
%%piper
sample_data()
>> select(['-ids', '-regions'])
>> across(columns='values_1', function=lambda x: x.astype(int), series_obj=True)
>> across(columns=['values_1'], function=lambda x: x.astype(int), series_obj=True)
>> head(tablefmt='plain')
dates order_dates countries values_1 values_2
0 2020-01-01 00:00:00 2020-01-07 00:00:00 Italy 311 26
1 2020-01-02 00:00:00 2020-01-08 00:00:00 Portugal 150 375
2 2020-01-03 00:00:00 2020-01-09 00:00:00 Spain 396 88
3 2020-01-04 00:00:00 2020-01-10 00:00:00 Italy 319 233
'''
if isinstance(df, pd.Series):
raise TypeError('Please specify DataFrame object')
if function is None:
raise ValueError('Please specify function to apply')
if isinstance(columns, str):
if columns not in df.columns:
raise ValueError(f'column {columns} not found')
if isinstance(columns, tuple):
columns = df.loc[:, slice(*columns)].columns.tolist()
if isinstance(columns, list):
for col in columns:
if col not in df.columns:
raise ValueError(f'column {col} not found')
if isinstance(columns, str):
# If not series function (to be applied to series values)
if not series_obj:
df[columns] = df[columns].apply(function, *args, **kwargs)
else:
df[[columns]] = df[[columns]].apply(function, *args, **kwargs)
try:
# No columns -> Apply with context of ALL dataframe columns
if columns is None:
df = df.apply(function, *args, **kwargs)
return df
# Specified group of columns to update.
if isinstance(columns, list):
# Apply function to each columns 'values'
if not series_obj:
for col in columns:
df[col] = df[col].apply(function, *args, **kwargs)
# No, user wants to use/access pandas Series object attributes
# e.g. str, astype etc.
else:
df[columns] = df[columns].apply(function, *args, **kwargs)
except ValueError as e:
logger.info(e)
msg = 'Are you trying to apply a function working with Series values(s)? Try series_obj=False'
raise ValueError(msg)
except AttributeError as e:
logger.info(e)
msg = 'Are you trying to apply a function using Series object(s)? Try series_obj=True'
raise AttributeError(msg)
return df
# adorn() {{{1
def adorn(df: pd.DataFrame,
columns: Union[str, list] = None,
fillna: Union[str, int] = '',
col_row_name: str = 'All',
axis: Union [int, str] = 0,
ignore_index: bool = False) -> pd.DataFrame:
'''add totals to a dataframe
Based on R janitor package function add row and/or column totals to a
dataframe.
Examples
--------
.. code-block::
df = sample_matrix(seed=42)
df = adorn(df, ['a', 'c'], axis='row')
head(df, 10, tablefmt='plain')
a b c d e
0 15 8.617 16 25.23 7.658
1 8 25.792 18 5.305 15.426
2 5 5.343 12 -9.133 -7.249
3 4 -0.128 13 0.92 -4.123
4 25 7.742 11 -4.247 4.556
All 57 70
.. code-block::
url = 'https://github.com/datagy/pivot_table_pandas/raw/master/sample_pivot.xlsx'
df = pd.read_excel(url, parse_dates=['Date'])
head(df)
Date Region Type Units Sales
2020-07-11 East Children's Clothing 18.0 306
2020-09-23 North Children's Clothing 14.0 448
g1 = df.groupby(['Type', 'Region']).agg(TotalSales=('Sales', 'sum')).unstack()
g1 = adorn(g1, axis='both').astype(int)
g1 = flatten_names(g1, remove_prefix='TotalSales')
g1
East North South West All
Children's Clothing 45849 37306 18570 20182 121907
Men's Clothing 51685 39975 18542 19077 129279
Women's Clothing 70229 61419 22203 22217 176068
All 167763 138700 59315 61476 427254
Parameters
----------
df
Pandas dataframe
columns
columns to be considered on the totals row. Default None - All columns
considered.
fillna
fill NaN values (default is '')
col_row_name
name of row/column title (default 'Total')
axis
axis to apply total (values: 0 or 'row', 1 or 'column')
To apply totals to both axes - use 'both'. (default is 0)
ignore_index
default False. When concatenating totals, ignore index in both
dataframes.
Returns
-------
A pandas DataFrame with additional totals row and/or column total.
'''
# ROW:
if axis == 0 or axis == 'row' or axis == 'both':
if columns is None:
numeric_columns = df.select_dtypes(include='number').columns
else:
if isinstance(columns, str):
numeric_columns = [columns]
else:
numeric_columns = columns
totals = {col: df[col].sum() for col in numeric_columns}
index_length = len(df.index.names)
if index_length == 1:
row_total = pd.DataFrame(totals, index=[col_row_name])
df =
|
pd.concat([df, row_total], axis=0, ignore_index=ignore_index)
|
pandas.concat
|
import csv
import logging
from datetime import datetime
from pathlib import Path
import extract_data as ex
import pandas as pd
logger = logging.getLogger(__name__)
def read_dat_as_DataFrame(input_filepath):
logger.info(f"reading {input_filepath}")
converted_count = 0
start_ts = datetime.now()
records = []
with input_filepath.open("r") as fin:
for line in fin:
if not line.startswith("A"):
continue
try:
epicenter = ex.extract_epicenter(line)
except ex.ExtractError as e:
logger.warning("skipped due to ExtractError: %s", line)
continue
except Exception as e:
logger.error("error line: %s", line)
raise e
records.append(epicenter)
converted_count += 1
df = pd.DataFrame.from_records(records)
end_ts = datetime.now()
elapsed_time = end_ts - start_ts
logger.info(
f"finish reading from {input_filepath}, shape: {df.shape}, time {elapsed_time}"
),
return df
def convert_dat_to_tsv(input_filepath, output_filepath):
logger.info(f"convert {input_filepath} into {output_filepath}")
converted_count = 0
start_ts = datetime.now()
with input_filepath.open("r") as fin, output_filepath.open("w") as fout:
writer = csv.DictWriter(fout, fieldnames=ex.FIELDNAMES, delimiter="\t")
writer.writeheader()
for line in fin:
if not line.startswith("A"):
continue
try:
epicenter = ex.extract_epicenter(line)
except ex.ExtractError as e:
logger.warning("skipped due to ExtractError: %s", line)
continue
except Exception as e:
logger.error("error line: %s", line)
raise e
writer.writerow(epicenter)
converted_count += 1
end_ts = datetime.now()
elapsed_time = end_ts - start_ts
logger.info(
f"{output_filepath} has {converted_count} records, time {elapsed_time}"
),
def main():
input_dir = Path("../data/utf-8")
df_list = [
read_dat_as_DataFrame(path) for path in input_dir.glob("i*.dat")
]
df_all =
|
pd.concat(df_list)
|
pandas.concat
|
# <NAME> (<EMAIL>)
from __future__ import absolute_import, division, print_function
from builtins import range
import numpy as np
import pandas as pd
import scipy.stats as ss
from joblib import Memory
from mlpaper.constants import METHOD, METRIC
from mlpaper.mlpaper import PAIRWISE_DEFAULT, loss_summary_table
MOMENT = "moment" # Don't put in constants since only needed for regression
def shape_and_validate(y, mu, std):
"""Validate shapes and types of predictive distribution against data and
return the shape information.
Parameters
----------
y : ndarray, shape (n_samples,)
True targets for each regression data point. Typically of type `float`.
mu : ndarray, shape (n_samples,)
Predictive mean for each regression data point. Typically of type
`float`. Must be of same shape as `y`.
std : ndarray, shape (n_samples,)
Predictive standard deviation for each regression data point. Typically
of type `float`. Must be positive and of same shape as `y`.
Returns
-------
n_samples : int
Number of data points (length of `y`)
"""
n_samples, = y.shape
assert n_samples >= 1
assert np.all(np.isfinite(y))
assert mu.shape == (n_samples,) and std.shape == (n_samples,)
assert np.all(np.isfinite(mu)) and np.all(np.isfinite(std))
assert np.all(std > 0.0)
return n_samples
# ============================================================================
# Loss functions
# ============================================================================
def square_loss(y, mu, std):
"""Compute MSE of predictions vs true targets.
Parameters
----------
y : ndarray, shape (n_samples,)
True targets for each regression data point. Typically of type `float`.
mu : ndarray, shape (n_samples,)
Predictive mean for each regression data point. Typically of type
`float`. Must be of same shape as `y`.
std : ndarray, shape (n_samples,)
Predictive standard deviation for each regression data point. Typically
of type `float`. Must be positive and of same shape as `y`. Ignored in
this function.
Returns
-------
loss : ndarray, shape (n_samples,)
Square error of target vs prediction. Same shape as `y`.
"""
shape_and_validate(y, mu, std)
loss = (y - mu) ** 2
return loss
def abs_loss(y, mu, std):
"""Compute MAE of predictions vs true targets.
Parameters
----------
y : ndarray, shape (n_samples,)
True targets for each regression data point. Typically of type `float`.
mu : ndarray, shape (n_samples,)
Predictive mean for each regression data point. Typically of type
`float`. Must be of same shape as `y`.
std : ndarray, shape (n_samples,)
Predictive standard deviation for each regression data point. Typically
of type `float`. Must be positive and of same shape as `y`. Ignored in
this function.
Returns
-------
loss : ndarray, shape (n_samples,)
Absolute error of target vs prediction. Same shape as `y`.
"""
shape_and_validate(y, mu, std)
loss = np.abs(y - mu)
return loss
def log_loss(y, mu, std):
"""Compute log loss of Gaussian predictive distribution on target `y`.
Parameters
----------
y : ndarray, shape (n_samples,)
True targets for each regression data point. Typically of type `float`.
mu : ndarray, shape (n_samples,)
Predictive mean for each regression data point. Typically of type
`float`. Must be of same shape as `y`.
std : ndarray, shape (n_samples,)
Predictive standard deviation for each regression data point. Typically
of type `float`. Must be positive and of same shape as `y`.
Returns
-------
loss : ndarray, shape (n_samples,)
Log loss of Gaussian predictive distribution on target `y`. Same shape
as `y`.
"""
shape_and_validate(y, mu, std)
loss = -ss.norm.logpdf(y, loc=mu, scale=std)
return loss
# ============================================================================
# Use and summarize loss functions
# ============================================================================
def loss_table(pred_tbl, y, metrics_dict):
"""Compute loss table from table of Gaussian predictions.
Parameters
----------
pred_tbl : DataFrame, shape (n_samples, n_methods * 2)
DataFrame with predictive distributions. Each row is a data point.
The columns should be hierarchical index that is the cartesian product
of methods x moments. For exampe, ``log_pred_prob_table.loc[5, 'foo']``
is a pandas series with (mean, std deviation) prediction that method
foo places on ``y[5]``. Cannot be empty.
y : ndarray, shape (n_samples,)
True targets for each regression data point. Typically of type `float`.
metrics_dict : dict of str to callable
Dictionary mapping loss function name to function that computes loss,
e.g., `log_loss`, `square_loss`, ...
Returns
-------
loss_tbl : DataFrame, shape (n_samples, n_metrics * n_methods)
DataFrame with loss of each method according to each loss function on
each data point. The rows are the data points in `y` (that is the index
matches `pred_tbl`). The columns are a hierarchical index that is the
cartesian product of loss x method. That is, the loss of method foo's
prediction of ``y[5]`` according to loss function bar is stored in
``loss_tbl.loc[5, ('bar', 'foo')]``.
"""
methods, moments = pred_tbl.columns.levels
assert "mu" in moments and "std" in moments
n_samples = len(pred_tbl)
assert y.shape == (n_samples,)
assert n_samples >= 1 and len(methods) >= 1
col_names = pd.MultiIndex.from_product([metrics_dict.keys(), methods], names=[METRIC, METHOD])
loss_tbl =
|
pd.DataFrame(index=pred_tbl.index, columns=col_names, dtype=float)
|
pandas.DataFrame
|
import os
import pandas as pd
import numpy as np
import re
import logging
DATA_PATH = os.getenv('DATA_PATH')
if DATA_PATH is None:
raise ValueError("DATA_PATH needs to be set")
def changeTrade(eba, rightba, wrongba, start=None, end=None, tol=1):
logger = logging.getLogger("clean")
ind = [True]*len(eba.df.index)
if start is not None:
ind &= eba.df.index > start
if end is not None:
ind &= eba.df.index < end
ind_diff = ((
(eba.df.loc[:, eba.KEY["ID"] % (rightba, wrongba)] + eba.df.loc[
:, eba.KEY["ID"] % (wrongba, rightba)]).abs() > tol)
| eba.df.loc[:, eba.KEY["ID"] % (wrongba, rightba)].isna())
ind_diff &= ind
eba.df.loc[ind_diff, eba.KEY["ID"] % (wrongba, rightba)] = (
-eba.df.loc[ind_diff, eba.KEY["ID"] % (rightba, wrongba)])
nchange = sum(ind_diff)
if nchange > 0:
logger.debug("Picking %s over %s for %d pts" % (
rightba, wrongba, nchange))
return eba
def fillNAs(eba, col, pad_limit=2, limit=3):
logger = logging.getLogger("clean")
ind_na = eba.df.loc[:, col].isna()
nchange = ind_na.sum()
if nchange > 0:
logger.debug("%s: %d NA values to deal with" % (
col, nchange))
# first try pad for 2 hours
eba.df.loc[:, col] = eba.df.loc[:, col].fillna(
method='pad', limit=pad_limit)
ind_na = eba.df.loc[:, col].isna()
nchange = ind_na.sum()
if nchange > 0:
logger.debug("%s: replacing %d NA values with next/prev week" % (
col, nchange))
if nchange > 50:
logger.warning("%s: replacing %d NA values with next/prev week" % (
col, nchange))
for ts in eba.df.index[ind_na]:
try:
eba.df.loc[ts, col] = eba.df.loc[
ts-pd.Timedelta("%dH" % (7*24)), col]
except KeyError:
eba.df.loc[ts, col] = eba.df.loc[
ts+pd.Timedelta("%dH" % (7*24)), col]
# If we didn't manage to get the right value, look forward
cnt = 0
while np.isnan(eba.df.loc[ts, col]):
cnt += 1
if cnt > limit:
logger.error("Tried to look %d times ahead for %s" %
(limit, str(ts)))
raise ValueError("Can't fill this NaN")
eba.df.loc[ts, col] = eba.df.loc[
ts+pd.Timedelta("%dH" % (cnt*7*24)), col]
return eba
def removeOutliers(eba, col, start=None, end=None, thresh_u=None,
thresh_l=None, remove=True, limit=4):
logger = logging.getLogger("clean")
if start is None:
start = pd.to_datetime("2016-01-01")
if end is None:
end = pd.to_datetime("2017-01-02")
if (thresh_u is None) and (thresh_l is None):
mu = eba.df.loc[start:end, col].mean()
sigma = eba.df.loc[start:end, col].std()
ind_out = np.abs(eba.df.loc[:, col]-mu) > (3*sigma)
else:
if thresh_l is None:
thresh_l = -np.inf
if thresh_u is None:
thresh_u = +np.inf
ind_out = (eba.df.loc[:, col] < thresh_l)
ind_out |= (eba.df.loc[:, col] > thresh_u)
ind_out &= (eba.df.index > start) & (eba.df.index < end)
nchange = sum(ind_out)
logger.debug("%s: %d outliers out of [%.2g, %.2g]" % (
col, nchange, thresh_l, thresh_u))
if nchange > 10:
logger.warning("%s: %d outliers out of [%.2g, %.2g]" % (
col, nchange, thresh_l, thresh_u))
if remove:
eba.df.loc[ind_out, col] = np.nan
return eba
def applyFixes3(eba, log_level=logging.INFO):
logger = logging.getLogger("clean")
log_level_old = logger.level
logger.setLevel(log_level)
# special changes
logger.debug("\tSpecial changes")
eba = removeOutliers(eba, "EBA.NSB-FPC.ID.H", thresh_u=-5,
start=pd.to_datetime("2016-02-12"),
end=pd.to_datetime("2016-02-14"))
eba = removeOutliers(eba, "EBA.NSB-FPC.ID.H", thresh_u=-5,
start=pd.to_datetime("2016-08-01"),
end=pd.to_datetime("2016-08-15"))
eba = removeOutliers(eba, "EBA.NSB-FPL.ID.H", thresh_u=-5.,
start=pd.to_datetime("2016-08-01"),
end=pd.to_datetime("2016-08-15"))
eba = removeOutliers(eba, "EBA.NSB-FPC.ID.H", thresh_u=-5,
start=pd.to_datetime("2016-10-07"),
end=pd.to_datetime("2016-10-08 03:00"))
eba = removeOutliers(eba, "EBA.NSB-FPL.ID.H", thresh_u=-5.,
start=pd.to_datetime("2016-10-07"),
end=pd.to_datetime("2016-10-08 03:00"))
for ba, ba2 in [("IID", "CISO"), ("PJM", "CPLW"), ("PJM", "DUK"),
("PJM", "TVA"),
("FPL", "SOCO"), ("SC", "SOCO"), ("SEPA", "SOCO"),
("CPLW", "TVA"), ("DUK", "TVA"),
("FMPP", "FPL"), ("FPC", "FPL"), ("JEA", "FPL"),
("SEC", "FPL"),
("CPLW", "DUK"), ("YAD", "DUK"), ("SEPA", "DUK"),
("DOPD", "BPAT"), ("LDWP", "BPAT"),
("FMPP", "FPC"), ("SEC", "FPC"),
("LDWP", "PACE"),
("LDWP", "NEVP"),
("SEPA", "SC"),
("FMPP", "TEC"),
("SEC", "JEA"),
("NSB", "FPC"), ("NSB", "FPL")]:
eba = fillNAs(eba, eba.KEY["ID"] % (ba, ba2))
eba = changeTrade(eba, ba, ba2, tol=0.)
for field in ["D", "NG"]:
eba = removeOutliers(eba, eba.get_cols(
r="FPC", field=field)[0], thresh_l=200.)
eba = removeOutliers(eba, eba.get_cols(
r="TVA", field=field)[0], thresh_l=3000.)
eba = removeOutliers(eba, eba.get_cols(r="PSCO", field=field)[
0], thresh_l=2000., thresh_u=10000.)
eba = removeOutliers(eba, eba.get_cols(
r="PACE", field=field)[0], thresh_u=10000.)
eba = removeOutliers(
eba, eba.get_cols(r="SRP", field=field)[0], thresh_l=1000.,
thresh_u=5000., start=pd.to_datetime("2016-12-01"),
end=pd.to_datetime("2016-12-31"))
eba = removeOutliers(
eba, eba.get_cols(r="SRP", field=field)[0], thresh_u=4900.,
start=pd.to_datetime("2016-01-01"),
end=pd.to_datetime("2016-05-01"))
eba = removeOutliers(eba, eba.get_cols(
r="LDWP", field=field)[0], thresh_l=100.)
eba = removeOutliers(
eba, eba.get_cols(r="IPCO", field=field)[0], thresh_l=800.,
start=pd.to_datetime("2016-08-01"),
end=
|
pd.to_datetime("2016-08-05")
|
pandas.to_datetime
|
#!/usr/bin/python2.7
from __future__ import division
import os
import urllib, cStringIO
import pymongo as pm
import numpy as np
import scipy.stats as stats
import pandas as pd
import json
import re
from PIL import Image
import base64
import sys
'''
To generate main dataframe from pymongo database, run, e.g.:
exp1 = ['run3_size4_waiting', 'run4_generalization']
exp2 = ['run5_submitButton']
python generate_refgame_dataframe.py --iterationName run3_size4_waiting
python generate_refgame_dataframe.py --iterationName run4_generalization
python generate_refgame_dataframe.py --iterationName run5_submitButton
'''
# directory & file hierarchy
proj_dir = os.path.abspath('../../..')
analysis_dir = os.getcwd()
results_dir = os.path.join(proj_dir,'results')
plot_dir = os.path.join(results_dir,'plots')
csv_dir = os.path.join(results_dir,'csv')
exp_dir = os.path.abspath(os.path.join(proj_dir,'experiments'))
sketch_dir = os.path.abspath(os.path.join(proj_dir,'sketches'))
# set vars
auth = pd.read_csv('auth.txt', header = None) # this auth.txt file contains the password for the sketchloop user
pswd = auth.values[0][0]
user = 'sketchloop'
host = 'stanford-cogsci.org' ## cocolab ip address
# have to fix this to be able to analyze from local
import pymongo as pm
conn = pm.MongoClient('mongodb://sketchloop:' + pswd + '@127.0.0.1')
db = conn['3dObjects']
coll = db['graphical_conventions']
# list of researcher mturk worker ID's to ignore
jefan = ['A1MMCS8S8CTWKU','A1MMCS8S8CTWKV','A1MMCS8S8CTWKS']
hawkrobe = ['A1BOIDKD33QSDK']
megsano = ['A1DVQQLVZR7W6I']
researchers = jefan + hawkrobe + megsano
# Assign variables within imported analysis helpers
import df_generation_helpers as h
if sys.version_info[0]>=3:
from importlib import reload
## add helpers to python path
if os.path.join(proj_dir,'analysis','python') not in sys.path:
sys.path.append(os.path.join(proj_dir,'analysis','python'))
if not os.path.exists(results_dir):
os.makedirs(results_dir)
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
if not os.path.exists(csv_dir):
os.makedirs(csv_dir)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--iterationName', type=str, \
help='options: run3_size4_waiting, run4_generalization, run5_submitButton',
default='run5_submitButton')
args = parser.parse_args()
iterationName = args.iterationName
## get total number of stroke and clickedObj events in the collection as a whole
S = coll.find({ '$and': [{'iterationName':iterationName}, {'eventType': 'stroke'}]}).sort('time')
C = coll.find({ '$and': [{'iterationName':iterationName}, {'eventType': 'clickedObj'}]}).sort('time')
## get list of all candidate games
all_games = coll.find({'iterationName':iterationName}).distinct('gameid')
## get list of complete and valid games
complete_games = h.get_complete_and_valid_games(all_games,coll,iterationName1,researchers=researchers, tolerate_undefined_worker=False)
## generate actual dataframe and get only valid games (filtering out games with low accuracy, timeouts)
D = h.generate_dataframe(coll, complete_games, iterationName, csv_dir)
# ## filter crazies and add column
D = h.find_crazies(D)
## add features for recognition experiment
D = h.add_recog_session_ids(D)
D = h.add_distractors_and_shapenet_ids(D)
## if generalization column still capitalized, fix it
try:
D = D.rename(index=str, columns={"Generalization": "generalization"})
except:
pass
## filter out single low accuracy game
D = D[D['low_acc'] != True]
## filter out games with missing data
missing_data_games = D[D['drawDuration'].isna()]['gameID'].values
D = D[-D['gameID'].isin(missing_data_games)]
## assign extra columns to keep track of category/subset/condition combinations
if iterationName=='run5_submitButton':
D = D.assign(category_subset = pd.Series(D['category'] + D['subset']))
D = D.assign(category_subset_condition =
|
pd.Series(D['category'] + D['subset'] + D['condition'])
|
pandas.Series
|
import matplotlib.pyplot as plt
import requests
from bs4 import BeautifulSoup
import pandas as pd
import six
import numpy as np
def render_mpl_table(data, col_width=3.0, row_height=0.625, font_size=14,
header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w',
bbox=[0, 0, 1, 1], header_columns=0,
ax=None, **kwargs):
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
for k, cell in six.iteritems(mpl_table._cells):
cell.set_edgecolor(edge_color)
if k[0] == 0 or k[1] < header_columns:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor(header_color)
else:
cell.set_facecolor(row_colors[k[0]%len(row_colors) ])
return ax
input_data_path = 'myTickerList.txt'
Tickers = pd.read_csv(input_data_path, header=None, names=['tk'])
#Tickers = ['AAPL', 'MSFT']
premarket = pd.DataFrame(columns=['Ticker','Change (pre)'])
aftermarket =
|
pd.DataFrame(columns=['Ticker','Change (after)'])
|
pandas.DataFrame
|
# In this program we take CSVs that are prepared with a search name - either a Fund name / ISIN / Stock ticker
# and use that to search either Investing.com (InvestPy) or Yahoo Finance (with pandas URL) to get historical
# price data. Then we plot graphs using matplotlib, and present these in PDF using ReportLab.
import investpy
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.ticker import MaxNLocator, LinearLocator
from urllib.error import HTTPError
from time import sleep
import os
import textwrap
import pickle # Use pickle module to save complex Python objects to the disk. If all objects can be handled by json,
# you can use json module, which gives a human-readable file. However in this case, we have dataframes.
# You can read dfs into json, but they must be assigned as json first. Simpler to use pickle here.
# ReportLab imports
from reportlab.platypus import SimpleDocTemplate, PageTemplate, Frame, Flowable, Paragraph, Table, TableStyle, Spacer, KeepTogether # Platypus contains the flowable classes
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle # StyleSheet is a set of default style we can use, and ParagraphStyle can customise them
from reportlab.lib.enums import TA_JUSTIFY, TA_CENTER # Import text alignment & justify constants here
from reportlab.lib import colors
from reportlab.pdfbase import pdfmetrics # Used to register fonts
from reportlab.pdfbase.ttfonts import TTFont # Used for creating TrueType font object
from io import BytesIO # IO streams are file-like objects that live in python memory, rather than on disk. Much faster, and less files!
from svglib.svglib import svg2rlg # Library for converting SVG image files into other formats (e.g. ReportLab graphics.)
# Read csv file of trades for a tax year. CSVs must be prepared with a search name col that works on Investing.com or Yahoo.
equities_DF = pd.read_csv('./Investments CSV Example.csv', sep=',', header=0, index_col=0).dropna(how='all')
# Convert date strings to datetime objects.
equities_DF['Bought'] = pd.to_datetime(equities_DF['Bought'], format='%d/%m/%Y')
equities_DF['Sold'] = pd.to_datetime(equities_DF['Sold'], format='%d/%m/%Y')
# Calculate time buffer so we get a bit of extra price data before and after trade.
equities_DF['Buffer'] = (equities_DF['Sold'] - equities_DF['Bought']) * 0.2
# Create column for time interval between Bought and Sold
equities_DF['Interval'] = equities_DF['Sold'] - equities_DF['Bought']
# Create search-date columns for 'bought' and 'sold' with added buffer
equities_DF['Bought_search'] = equities_DF['Bought'] - equities_DF['Buffer']
equities_DF['Sold_search'] = equities_DF['Sold'] + equities_DF['Buffer']
# Create two Epoch timestamp (ts) columns for bought and sold dates.
equities_DF['Bought_ts'] = ((equities_DF['Bought_search'] - pd.Timestamp('1970-01-01')) // pd.Timedelta('1s')).astype('Int64') # Int64 is a special pandas type that supports nullable Ints.
equities_DF['Sold_ts'] = ((equities_DF['Sold_search'] - pd.Timestamp('1970-01-01')) // pd.Timedelta('1s')).astype('Int64')
# Create master dictionary for holding name of equity and df of historical prices
prices = {}
# Function for fetching historical price data
def fetch_prices():
consecutive_trades = '' # Variable for checking consecutive trades (i.e. where two different purchases were sold together)
consecutive_trades_I = '' # Another variable for checking consecutive trades (but in second part of code)
for equity in enumerate(equities_DF.index):
# Add fund/share as dict key, and add sub-dict with key as 'Investpy fund name' / 'ISIN' / 'Ticker' and search name as value.
if equity[1] not in prices:
prices[equity[1]] = equities_DF.iloc[equity[0], 7:10].dropna().to_dict()
consecutive_trades = equity[1]
elif equity[1] == consecutive_trades: # If a consecutive buy exists, add the date of that buy.
prices[equity[1]]['Additional buy'] = equities_DF.iloc[equity[0], 1]
consecutive_trades = equity[1]
# Set default search country as UK, unless 'USD' found.
country = 'United Kingdom'
if 'USD' in equity[1]:
country = 'United States'
elif 'CAD' in equity[1]:
country = 'Canada'
# Retrieve historic fund/share prices
# First check what type of search we need to do: using Fund Name, ISIN, or ticker
if equity[1] == consecutive_trades_I: # Skip the additional buys if they are part of same sell transaction.
print(f'{equity[0]}. Additional buy for {equity[1]} - skipped.')
continue
elif 'InvestPy Fund Name' in prices[equity[1]]:
search_name = prices[equity[1]]['InvestPy Fund Name'] # Get value that we use to search InvestPy or Yahoo.
try: # Add a df of historical price data to 'Price History' key
prices[equity[1]]['Price History'] = investpy.get_fund_historical_data(fund=search_name,
country=country, # Below converts datetime to string format for searching
from_date=equities_DF.iloc[equity[0], -4].strftime('%d/%m/%Y'),
to_date=equities_DF.iloc[equity[0], -3].strftime('%d/%m/%Y'),
interval='Daily')
print(f'{equity[0]}. Retrieved fund price data for {equity[1]}.')
except RuntimeError:
print(RuntimeError, f'CHECK! InvestPy did not find price data for {equity[1]}')
elif 'Stock Ticker' in prices[equity[1]]:
search_name = prices[equity[1]]['Stock Ticker']
try:
prices[equity[1]]['Price History'] = investpy.get_stock_historical_data(stock=search_name,
country=country,
from_date=equities_DF.iloc[equity[0], -4].strftime('%d/%m/%Y'),
to_date=equities_DF.iloc[equity[0], -3].strftime('%d/%m/%Y'),
interval='Daily')
print(f'{equity[0]}. Retrieved stock price data for {equity[1]}.')
except RuntimeError: # If InvestPy fails, try Yahoo Finance.
prices[equity[1]]['Price History'] = pd.read_csv(f'https://query1.finance.yahoo.com/v7/finance/download/{search_name}?period1={equities_DF.iloc[equity[0], -2]}&period2={equities_DF.iloc[equity[0], -1]}&interval=1d&events=history', index_col='Date')
# Yahoo Finance data not downloaded as datetime objects - convert these:
prices[equity[1]]['Price History'].index = pd.to_datetime(prices[equity[1]]['Price History'].index, format='%Y-%m-%d')
print(f'{equity[0]}. Retrieved stock price data for {equity[1]} from YF.')
sleep(1) # Ensures we don't overload Yahoo with requests.
except HTTPError:
print('CHECK! Yahoo Finance request failed for', equity[1])
elif 'ISIN for Yahoo Finance' in prices[equity[1]]:
search_name = prices[equity[1]]['ISIN for Yahoo Finance']
try:
prices[equity[1]]['Price History'] = pd.read_csv(f'https://query1.finance.yahoo.com/v7/finance/download/{search_name}?period1={equities_DF.iloc[equity[0], -2]}&period2={equities_DF.iloc[equity[0], -1]}&interval=1d&events=history', index_col='Date')
prices[equity[1]]['Price History'].index = pd.to_datetime(prices[equity[1]]['Price History'].index, format='%Y-%m-%d') # Convert index to datetime
print(f'{equity[0]}. Retrieved fund price data for {equity[1]} using ISIN.')
sleep(1)
except HTTPError:
try: # Some ISIN numbers require a '.L' on the end to be found on Yahoo for some reason.
prices[equity[1]]['Price History'] = pd.read_csv(f'https://query1.finance.yahoo.com/v7/finance/download/{search_name}.L?period1={equities_DF.iloc[equity[0], -2]}&period2={equities_DF.iloc[equity[0], -1]}&interval=1d&events=history', index_col='Date')
prices[equity[1]]['Price History'].index = pd.to_datetime(prices[equity[1]]['Price History'].index, format='%Y-%m-%d') # Convert index to datetime
print(f'{equity[0]}. Retrieved fund price data for {equity[1]} using ISIN.')
sleep(1)
except HTTPError:
print('CHECK! Yahoo Finance request failed for', equity[1])
except Exception as UnknownError:
print('Unknown error for', equity[1], UnknownError)
else: # I couldn't find this equity on Investing.com or Yahoo Finance so we just skip it.
print(f'{equity[0]}. No price data for this equity - skipped.')
consecutive_trades_I = equity[1] # Overwrite this var to check for consecutives.
# Now correct price data which is in £ not pennies: Some funds randomly change from £s to pennies midway through dataset.
try: # Correct values which are < max value divided by 100.
prices[equity[1]]['Price History'].loc[prices[equity[1]]['Price History']['Close'] < prices[equity[1]]['Price History']['Close'].max() / 100, ['Open', 'High', 'Low', 'Close']] *= 100
except KeyError:
print(KeyError, 'This equity had no price data')
# Fetch the prices if not found already:
if not os.path.isfile('./prices_2019-20.pkl'):
fetch_prices()
# Save prices dictionary to disk, so I don't have to retrieve price data everytime.
# Highest protocol ensures the correct compatibility with my Python version. This is a binary encoding, hence 'wb'.
def save_prices(prices_dict, filename):
with open(filename, 'wb') as filepath:
pickle.dump(prices_dict, filepath, pickle.HIGHEST_PROTOCOL)
# Save the prices to file (can # out so it doesn't run everytime):
if not os.path.isfile('./prices_2019-20.pkl'):
save_prices(prices, 'prices_2019-20.pkl')
# Read pickle file into Python again.
def load_prices(filename):
with open(filename, 'rb') as file:
prices = pickle.load(file)
return prices
# Load the prices data
if os.path.isfile('./prices_2019-20.pkl'):
load_prices('prices_2019-20.pkl')
###------------------------MATPLOTLIB PLOTTING SECTION------------------------###
# Create overview of trades in subplots. Create fig handle and axs 2D numpy array containing all 20 axes.
def overview_plots():
fig, axs = plt.subplots(nrows=4, ncols=6, figsize=(12, 6), tight_layout=True)
fig.suptitle(f'Historical Price Data for Investments Sold in XXXX-XX')
# Set accuracy of Tick labels to be used, depending on Buy-Sell time interval
monthYear = mdates.DateFormatter('%b-%y')
dayMonthYear = mdates.DateFormatter('%d/%m/%y')
# ax.flat is an attribute of ax that gives an iterator where the 4x6 array is flattened to a 1D array. Allows us to loop through.
for ax, (equity_name, equity) in zip(axs.flat, prices.items()):
if equity.get('Price History') is not None:
ax.set_title("\n".join(textwrap.wrap(equity_name, 45)), fontsize=6, wrap=True) # Use textwrap to split string according to the char limit (45), then join with /n.
ax.plot(equity['Price History'].index, equity['Price History']['Close'], color='blue', linewidth=1)
ax.tick_params(labelsize=4)
ax.set_xlabel('Date', fontsize=5)
ax.set_ylabel('Price', fontsize=5)
locator = MaxNLocator(nbins='auto') # Create an automatic tick spacer
numticks = LinearLocator(numticks=6) # Create a linear tick spacer of set no. of ticks
ax.yaxis.set_major_locator(locator)
ax.xaxis.set_major_locator(numticks)
# We use the 'Interval' column to determine what Tick formatting accuracy we should use on the graphs.
interval = equities_DF.loc[equity_name, 'Interval']
if isinstance(interval, pd.Series): # Where we have consecutive trades, we have 2 values in a series.
interval = equities_DF.loc[equity_name, 'Interval'][0]
if interval < pd.Timedelta(60, 'days'):
ax.xaxis.set_major_formatter(dayMonthYear)
ax.tick_params(axis='x', labelrotation=30)
else:
ax.xaxis.set_major_formatter(monthYear)
# Define buy and sold dates
bought_date = equities_DF.loc[equity_name, 'Bought']
sold_date = equities_DF.loc[equity_name, 'Sold']
if isinstance(bought_date, pd.Series):
bought_date = bought_date[0]
sold_date = sold_date[0]
# Try to annotate Buy and Sell arrows
bought_ycoord = prices[equity_name]['Price History'].loc[bought_date, 'Close']
sold_ycoord = prices[equity_name]['Price History'].loc[sold_date, 'Close']
if not pd.isna([bought_ycoord, sold_ycoord]).any():
ax.annotate('Bought', (bought_date, bought_ycoord), xycoords='data', fontsize=5, fontweight='semibold', color='orange', xytext=(-15, -25), textcoords='offset points', arrowprops={'arrowstyle': '->'})
ax.annotate('Sold', (sold_date, sold_ycoord), xycoords='data', fontsize=5, fontweight='semibold', color='red', xytext=(-15, -25), textcoords='offset points', arrowprops={'arrowstyle': '->'})
else:
pass
else:
continue
overview_plots()
##########################################################################################################
###------------------------------------------ PDF Production ------------------------------------------###
##########################################################################################################
# Using ReportLab, you can either layout the PDF using a canvas, and painting it with static content, such as
# strings, lines, drawings, logos etc. Or you you can use Flowables which is a list of items or content that we want to add to the PDF.
# These are easily styled with margins, paragraph style etc., and so are great for report content that's used repeatedly.
# Flowables are appended one after the other, a bit like typing in a Word Doc. Whereas, static elements are drawn in a fixed location.
# Normally flowables are appended to the story list, which is then used to build the final PDF.
# Mixing static content, and flowables can be a bit messy though. The way to do it is to use PageTemplate, which draws on static
# content, and also has a Frame that holds the flowables. You assign that template to the PDF before building it.
# First, define function that draws static content. i.e. content that is in the same position for every page.
# This function is used later in drawOn argument, and MUST include (canvas, doc) args
def draw_static_elements(canvas, pdf_doc):
canvas.saveState() # saveState saves current font, graphics transform for later recall by the next restoreState
# TrueType (.ttf) fonts are those used on Mac and PC systems, as opposed to Type1 fonts developed by Adobe in their PDFs.
# Must use a font with .ttc, .ttf, .otf format. ReportLab searches through your computer for them. 'Font Suitcase' not usable unfortunately
pdfmetrics.registerFont(TTFont('Euphemia', 'EuphemiaCAS.ttc'))
canvas.setFont('Euphemia', 10)
# Draw string at fixed location (top-left corner)
canvas.drawString(30, 810, f'Report generated on {pd.to_datetime("today"):%d/%m/%Y}')
# Reset font, graphic settings back to what they were before this function ran
canvas.restoreState()
# Define function to rescale drawing objects
def scale_to_fit(drawing, pdf_doc):
"""This function scales the drawing object to fit within the margin width of the pdf SampleDocTemplate"""
max_width = pdf_doc.width
scale_factor = max_width / drawing.width
# Not sure why, but width and height attributes don't actually change the size, but they must be changed to help the positioning during pdf build.
drawing.width *= scale_factor
drawing.height *= scale_factor
drawing.scale(scale_factor, scale_factor) # This actually scales the image by changing transform attr. Two args: scale_x, scale_y
drawing.hAlign = 'RIGHT'
return drawing
class Line(Flowable): # Inherits attributes from Flowable class, so it can be appended to story.
def __init__(self, width, height=0): # Only need to specify width to draw a line.
Flowable.__init__(self)
self.width = width
self.height = height
def __repr__(self):
return f"Line with width={self.width}"
def draw(self):
"""Use canvas.line method. Provide two X,Y pairs for start and end of line."""
self.canv.line(0, self.height, self.width, self.height)
line = Line(438) # 438 is the approx width of the text in the PDF
# SET UP PDF READY FOR TAKING FIGURES #
# The simple doc template sets up our document. You can specify page size, margins etc
pdf = SimpleDocTemplate('Report Preview.pdf', topMargin=57, bottomMargin=35, author='<NAME>', showBoundary=False)
# Create Frame for holding flowables. Frame object is used by the platypus modules. Args: x,y (bottom left),
frame = Frame(pdf.leftMargin, pdf.bottomMargin, pdf.width, pdf.height, showBoundary=False)
# Add Frame to the page template and call on template to draw static objects
template = PageTemplate(frames=[frame], onPage=draw_static_elements)
# Add the template to the simple doc
pdf.addPageTemplates(template)
# Get the preset paragraph/text styles
styles = getSampleStyleSheet()
# TrueType (.ttf) fonts are those used on Mac and PC systems, as opposed to Type1 fonts developed by Adobe in their PDFs.
# Must use a font with .ttc, .ttf, .otf format. ReportLab searches through your computer for them. 'Font Suitcase' not usable unfortunately
pdfmetrics.registerFont(TTFont('Palatino Linotype', 'Palatino Linotype.ttf'))
# Create custom paragraph style
styles.add(ParagraphStyle(name='MainTitle', fontName='Palatino Linotype', underlineWidth=1, fontSize=16, alignment=TA_CENTER))
styles.add(ParagraphStyle(name='EquityHeading', fontName='Palatino Linotype', fontSize=12, alignment=TA_JUSTIFY))
styles.add(ParagraphStyle(name='Body', fontName='Palatino Linotype', fontSize=10, alignment=TA_JUSTIFY))
# Define story list for holding flowables
story = list()
# Add a paragraph to the pdf story with the title. </u> is XML for underline.
story.append(Paragraph('<u>HL Fund and Share Account Trades: Tax Year XXXX-XX</u>', style=styles['MainTitle']))
# Add a blank line. If font size is 10, then height=12 adds a blank line.
story.append(Spacer(5, 30))
# In loop below, recreate individual, larger figures for each equity.
# Set accuracy of Tick labels to be used, depending on Buy-Sell interval
monthYear = mdates.DateFormatter('%b-%y')
dayMonthYear = mdates.DateFormatter('%d-%b-%y')
# Create historical price plots. Each plot will be saved in-memory to BytesIO object to be put into PDF document
for equity_name, equity in prices.items():
if equity.get('Price History') is not None:
fig, ax = plt.subplots(figsize=(7, 4), tight_layout=True)
ax.plot(equity['Price History'].index, equity['Price History']['Close'], color='blue', linewidth=1)
ax.grid(color='lightgrey', linestyle='-', linewidth=0.5)
ax.tick_params(labelsize=8)
ax.set_xlabel('Date', fontsize=11)
ax.set_ylabel('Price', fontsize=11)
locator = MaxNLocator(nbins='auto')
numticks = LinearLocator(numticks=8)
ax.yaxis.set_major_locator(locator)
ax.xaxis.set_major_locator(numticks)
# Use the Interval column to determine what Tick formatting accuracy we should use on the graphs.
interval = equities_DF.loc[equity_name, 'Interval']
if isinstance(interval, pd.Series):
interval = equities_DF.loc[equity_name, 'Interval'][0]
if interval <
|
pd.Timedelta(60, 'days')
|
pandas.Timedelta
|
import datetime
import re
import csv
import numpy as np
import pandas as pd
import sklearn
from sklearn.metrics import accuracy_score, classification_report
from sklearn.metrics import confusion_matrix
from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
from IMLearn.utils import split_train_test
# from __future__ import annotations
# from typing import NoReturn
from IMLearn.base import BaseEstimator
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
def make_condition_to_sum(cond: str, full_price: float,
night_price: float) -> float:
sum = 0
cond1 = re.split("D", cond)
days_before_checking = int(cond1[0])
if cond1[1].find("P") != -1:
percent = int(re.split("P", cond1[1])[0]) / 100
sum += full_price * percent * days_before_checking
else:
num_nights = int(re.split("N", cond1[1])[0])
sum += night_price * num_nights * days_before_checking
return sum
def f10(cancellation: str, full_price: float, night_price: float) -> (float, float):
if cancellation == "UNKNOWN":
return 0, 0
sum = 0
no_show = 0
cond = re.split("_", cancellation)
if len(cond) == 1:
sum += make_condition_to_sum(cond[0], full_price, night_price)
else:
sum += make_condition_to_sum(cond[0], full_price, night_price)
if cond[1].find("D") != -1:
sum += make_condition_to_sum(cond[1], full_price, night_price)
else:
if cond[1].find("P") != -1:
percent = int(re.split("P", cond[1])[0]) / 100
no_show += full_price * percent
else:
num_nights = int(re.split("N", cond[1])[0])
no_show += night_price * num_nights
return sum, no_show
def get_cancellation(features: pd.DataFrame):
sum = []
no_show = []
for index, row in features.iterrows():
a,b = f10(row.cancellation_policy_code, row.original_selling_amount, row.price_per_night)
sum.append(a)
no_show.append(b)
return sum, no_show
def load_data(filename: str, with_lables = True):
"""
Load Agoda booking cancellation dataset
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector in either of the following formats:
1) Single dataframe with last column representing the response
2) Tuple of pandas.DataFrame and Series
3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
# TODO - replace below code with any desired preprocessing
full_data =
|
pd.read_csv(filename)
|
pandas.read_csv
|
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from yitian.datasource import *
from yitian.datasource import preprocess
class Test(unittest.TestCase):
# def test_standardize_date(self):
# data_pd = pd.DataFrame([
# ['01/01/2019', 11.11],
# ['01/04/2019', 44.44],
# ['01/03/2019', 33.33],
# ['01/02/2019', 22.22]
# ], columns=['Trade Date', 'price'])
#
# expect_pd = pd.DataFrame([
# ['01/01/2019', 11.11],
# ['01/04/2019', 44.44],
# ['01/03/2019', 33.33],
# ['01/02/2019', 22.22]
# ], columns=['date', 'price'])
#
# assert_frame_equal(expect_pd, preprocess.standardize_date(data_pd))
#
# def test_standardize_date_with_multi_date_column(self):
# data_pd = pd.DataFrame([
# ['2019-01-01 00:00:00', '2019-01-01 00:00:00', 11.11],
# ['2019-01-02 00:00:00', '2019-01-01 00:00:00', 22.22],
# ['2019-01-03 00:00:00', '2019-01-01 00:00:00', 33.33],
# ['2019-01-04 00:00:00', '2019-01-01 00:00:00', 44.44],
# ], columns=['DATE', 'date', 'price'])
#
# with self.assertRaises(ValueError) as context:
# preprocess.standardize_date(data_pd)
#
# assert str(context.exception) == \
# str("Original cols ({cols}) cannot be reconnciled with date options ({option})"\
# .format(cols=data_pd.columns.tolist(), option=RAW_DATE_OPTIONS))
def test_create_ts_pd(self):
data_pd = pd.DataFrame([
['01/01/2019', 11.11],
['01/04/2019', 44.44],
['01/03/2019', 33.33],
['01/02/2019', 22.22]
], columns=['date', 'price'])
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11],
[pd.Timestamp('2019-01-02'), 22.22],
[pd.Timestamp('2019-01-03'), 33.33],
[pd.Timestamp('2019-01-04'), 44.44]
], columns=['date', 'price']).set_index('date')
assert_frame_equal(expect_pd, preprocess.create_ts_pd(data_pd))
def test_create_ts_pd_datetime(self):
data_pd = pd.DataFrame([
['2019-01-01 11:11:11', 11.11],
['2019-01-04 04:44:44', 44.44],
['2019-01-03 03:33:33', 33.33],
['2019-01-02 22:22:22', 22.22]
], columns=['datetime', 'price'])
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01 11:11:11'), 11.11],
[pd.Timestamp('2019-01-02 22:22:22'), 22.22],
[pd.Timestamp('2019-01-03 03:33:33'), 33.33],
[pd.Timestamp('2019-01-04 04:44:44'), 44.44]
], columns=['datetime', 'price']).set_index('datetime')
assert_frame_equal(expect_pd, preprocess.create_ts_pd(data_pd, index_col=DATETIME))
def test_add_ymd(self):
data_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11],
[pd.Timestamp('2019-02-02'), 22.22],
[pd.Timestamp('2019-03-03'), 33.33],
[
|
pd.Timestamp('2019-04-04')
|
pandas.Timestamp
|
"""
Author: <NAME>
Modified: <NAME>
"""
import os
import warnings
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from statsmodels.tools.sm_exceptions import EstimationWarning
from statsmodels.tsa.holtwinters import (ExponentialSmoothing,
SimpleExpSmoothing, Holt, SMOOTHERS, PY_SMOOTHERS)
base, _ = os.path.split(os.path.abspath(__file__))
housing_data = pd.read_csv(os.path.join(base, 'results', 'housing-data.csv'))
housing_data = housing_data.set_index('DATE')
housing_data = housing_data.asfreq('MS')
SEASONALS = ('add', 'mul', None)
TRENDS = ('add', 'mul', None)
def _simple_dbl_exp_smoother(x, alpha, beta, l0, b0, nforecast=0):
"""
Simple, slow, direct implementation of double exp smoothing for testing
"""
n = x.shape[0]
l = np.zeros(n)
b = np.zeros(n)
xhat = np.zeros(n)
f = np.zeros(nforecast)
l[0] = l0
b[0] = b0
# Special case the 0 observations since index -1 is not available
xhat[0] = l0 + b0
l[0] = alpha * x[0] + (1 - alpha) * (l0 + b0)
b[0] = beta * (l[0] - l0) + (1 - beta) * b0
for t in range(1, n):
# Obs in index t is the time t forecast for t + 1
l[t] = alpha * x[t] + (1 - alpha) * (l[t - 1] + b[t - 1])
b[t] = beta * (l[t] - l[t - 1]) + (1 - beta) * b[t - 1]
xhat[1:] = l[0:-1] + b[0:-1]
f[:] = l[-1] + np.arange(1, nforecast + 1) * b[-1]
err = x - xhat
return l, b, f, err, xhat
class TestHoltWinters(object):
@classmethod
def setup_class(cls):
# Changed for backwards compatibility with pandas
# oildata_oil_json = '{"851990400000":446.6565229,"883526400000":454.4733065,"915062400000":455.662974,"946598400000":423.6322388,"978220800000":456.2713279,"1009756800000":440.5880501,"1041292800000":425.3325201,"1072828800000":485.1494479,"1104451200000":506.0481621,"1135987200000":526.7919833,"1167523200000":514.268889,"1199059200000":494.2110193}'
# oildata_oil = pd.read_json(oildata_oil_json, typ='Series').sort_index()
data = [446.65652290000003, 454.47330649999998, 455.66297400000002,
423.63223879999998, 456.27132790000002, 440.58805009999998,
425.33252010000001, 485.14944789999998, 506.04816210000001,
526.79198329999997, 514.26888899999994, 494.21101929999998]
index = ['1996-12-31 00:00:00', '1997-12-31 00:00:00', '1998-12-31 00:00:00',
'1999-12-31 00:00:00', '2000-12-31 00:00:00', '2001-12-31 00:00:00',
'2002-12-31 00:00:00', '2003-12-31 00:00:00', '2004-12-31 00:00:00',
'2005-12-31 00:00:00', '2006-12-31 00:00:00', '2007-12-31 00:00:00']
oildata_oil = pd.Series(data, index)
oildata_oil.index = pd.DatetimeIndex(oildata_oil.index,
freq=
|
pd.infer_freq(oildata_oil.index)
|
pandas.infer_freq
|
from celery import Celery
import glob
import sys
import os
import uuid
import requests
import pandas as pd
import utils
from celery.signals import worker_ready
celery_instance = Celery('tasks', backend='redis://externalstructureproxy-redis', broker='pyamqp://guest@externalstructureproxy-rabbitmq//', )
@worker_ready.connect
def onstart(**k):
#_gnps_list = utils.load_GNPS(library_names=["GNPS-LIBRARY"])
_gnps_list = utils.load_GNPS()
_gnps_list = utils.gnps_format_libraries(_gnps_list)
gnps_df =
|
pd.DataFrame(_gnps_list)
|
pandas.DataFrame
|
# pylint: disable=E1101
from datetime import datetime
import os
import unittest
import warnings
import nose
import numpy as np
from pandas.core.frame import DataFrame, Series
from pandas.io.parsers import read_csv
from pandas.io.stata import read_stata, StataReader, StataWriter
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.util.misc import is_little_endian
class StataTests(unittest.TestCase):
def setUp(self):
# Unit test datasets for dta7 - dta9 (old stata formats 104, 105 and 107) can be downloaded from:
# http://stata-press.com/data/glmext.html
self.dirpath = tm.get_data_path()
self.dta1 = os.path.join(self.dirpath, 'stata1.dta')
self.dta2 = os.path.join(self.dirpath, 'stata2.dta')
self.dta3 = os.path.join(self.dirpath, 'stata3.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4 = os.path.join(self.dirpath, 'stata4.dta')
self.dta5 = os.path.join(self.dirpath, 'stata5.dta')
self.dta6 = os.path.join(self.dirpath, 'stata6.dta')
self.dta7 = os.path.join(self.dirpath, 'cancer.dta')
self.csv7 = os.path.join(self.dirpath, 'cancer.csv')
self.dta8 = os.path.join(self.dirpath, 'tbl19-3.dta')
self.csv8 = os.path.join(self.dirpath, 'tbl19-3.csv')
self.dta9 = os.path.join(self.dirpath, 'lbw.dta')
self.csv9 = os.path.join(self.dirpath, 'lbw.csv')
self.dta10 = os.path.join(self.dirpath, 'stata10.dta')
def read_dta(self, file):
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_dta1(self):
reader = StataReader(self.dta1)
parsed = reader.data()
# Pandas uses np.nan as missing value. Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss'])
for i, col in enumerate(parsed.columns):
np.testing.assert_almost_equal(
parsed[col],
expected[expected.columns[i]]
)
def test_read_dta2(self):
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('NaT')
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date', 'monthly_date', 'quarterly_date', 'half_yearly_date', 'yearly_date']
)
with warnings.catch_warnings(record=True) as w:
parsed = self.read_dta(self.dta2)
np.testing.assert_equal(
len(w), 1) # should get a warning for that format.
tm.assert_frame_equal(parsed, expected)
def test_read_dta3(self):
parsed = self.read_dta(self.dta3)
expected = self.read_csv(self.csv3)
for i, col in enumerate(parsed.columns):
np.testing.assert_almost_equal(
parsed[col],
expected[expected.columns[i]],
decimal=3
)
def test_read_dta4(self):
parsed = self.read_dta(self.dta4)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled', 'labeled_with_missings', 'float_labelled'])
tm.assert_frame_equal(parsed, expected)
def test_write_dta5(self):
if not is_little_endian():
raise nose.SkipTest("known failure of test_write_dta5 on non-little endian")
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss'])
original.index.name = 'index'
with ensure_clean(self.dta5) as path:
original.to_stata(path, None, False)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
def test_write_dta6(self):
if not is_little_endian():
raise nose.SkipTest("known failure of test_write_dta6 on non-little endian")
original = self.read_csv(self.csv3)
original.index.name = 'index'
with ensure_clean(self.dta6) as path:
original.to_stata(path, None, False)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), original)
@nose.tools.nottest
def test_read_dta7(self):
expected = read_csv(self.csv7, parse_dates=True, sep='\t')
parsed = self.read_dta(self.dta7)
for i, col in enumerate(parsed.columns):
np.testing.assert_almost_equal(
parsed[col],
expected[expected.columns[i]],
decimal=3
)
@nose.tools.nottest
def test_read_dta8(self):
expected = read_csv(self.csv8, parse_dates=True, sep='\t')
parsed = self.read_dta(self.dta8)
for i, col in enumerate(parsed.columns):
np.testing.assert_almost_equal(
parsed[col],
expected[expected.columns[i]],
decimal=3
)
@nose.tools.nottest
def test_read_dta9(self):
expected = read_csv(self.csv9, parse_dates=True, sep='\t')
parsed = self.read_dta(self.dta9)
for i, col in enumerate(parsed.columns):
np.testing.assert_equal(
parsed[col],
expected[expected.columns[i]],
decimal=3
)
def test_read_dta10(self):
if not is_little_endian():
raise nose.SkipTest("known failure of test_write_dta10 on non-little endian")
original = DataFrame(
data=
[
["string", "object", 1, 1.1, np.datetime64('2003-12-25')]
],
columns=['string', 'object', 'integer', 'float', 'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
with
|
ensure_clean(self.dta10)
|
pandas.util.testing.ensure_clean
|
import pandas as pd
import numpy as np
import random as rd
import math as mt
from pathlib import Path
from datetime import datetime as dt
import tensorflow as tf
from tensorflow.keras import Input
from tensorflow.keras import backend as K
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Dropout, BatchNormalization
from tensorflow.keras.callbacks import EarlyStopping
def initGPU():
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
def getData(path, targets, predictors, save=False, load=False, targetsCols=None, predictorsCols=None, centerBy=False, centerSize=False, centerSeed=1337):
def loadData():
return (
(pd.read_csv(path/(predictors+'_training.csv'), index_col=0), pd.read_csv(path/(predictors+'_validation.csv'), index_col=0)),
(
|
pd.read_csv(path/(targets+'_training.csv'), index_col=0)
|
pandas.read_csv
|
# Import libraries
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from datetime import date
import os
# Defining class:
class MovieDiary:
""" A class to keep track of movies watched,
including location and an evaluation of the movie.
Attributes:
diary: pandas dataframe that can be filled with different movies."""
def __init__(self):
self.diary = pd.DataFrame(columns=['date',
'movie',
'Where',
'Category',
'Evaluation'])
def create_diary(self):
"""Creates a diary in form of a pandas dataframe and stores it in csv-file."""
self.diary = pd.DataFrame(columns=['date',
'movie',
'Where',
'Category',
'Evaluation'])
self.diary.to_csv('my_movie_diary.csv')
def add_movie(self, movie, where, category, evaluation):
"""Function to add a new movie to an existing movie diary.
For now, the date column is always "today".
Args:
movie (string): The movie the user watched.
where (string): The location where the movie was watched.
category (string): The category of the movie watched, e.g. cinema, netflix.
evaluation (int): The evaluation the user gives the movie.
Returns:
The updated dataframe with the new movie
The csv file my_movie_diary.csv"""
try:
self.diary = pd.read_csv('my_movie_diary.csv', index_col=0)
new_entry = {'date': date.today(),
'movie': movie,
'Where': where,
'Category': category,
'Evaluation': evaluation}
self.diary = self.diary.append(new_entry, ignore_index=True)
self.diary.to_csv('my_movie_diary.csv')
return self.diary
except FileNotFoundError:
print('No diary found. Please run .create_diary() first.')
def delete_diary(self):
"""A function to delete the existing csv file that stores the diary."""
try:
os.remove('my_movie_diary.csv')
print('Diary deleted successfully.')
except FileNotFoundError:
print('There is no diary to delete left, you berserker.')
def plot_top_movies_time(self):
"""A function to plot the views of the 30 most viewed movies over time,
including the category and evaluation of the movie.
Returns:
Seaborn scatterplot"""
try:
movie_diary = pd.read_csv('my_movie_diary.csv', index_col=0)
top_movies_list = movie_diary.groupby('movie').date.count().reset_index().sort_values('date',
ascending=False).iloc[
:30]['movie']
movie_diary_top_movies =
|
pd.merge(movie_diary, top_movies_list, on='movie', how='inner')
|
pandas.merge
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
IntervalIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameSortIndex:
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame(
{"value": [1, 2, 3, 4]},
index=MultiIndex(
levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame(
{"value": [2, 1, 4, 3]},
index=MultiIndex(
levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_non_existent_label_multiindex(self):
# GH#12261
df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []]))
df.loc["b", "2"] = 1
df.loc["a", "3"] = 1
result = df.sort_index().index.is_monotonic
assert result is True
def test_sort_index_reorder_on_ops(self):
# GH#15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["red", "blu"]],
names=["letter", "size", "color"],
),
columns=["near", "far"],
)
df = df.sort_index()
def my_func(group):
group.index = ["newz", "newa"]
return group
result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index()
expected = MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["newa", "newz"]],
names=["letter", "size", None],
)
tm.assert_index_equal(result.index, expected)
def test_sort_index_nan_multiindex(self):
# GH#14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD"))
s = Series(np.arange(4), index=mi)
df2 = DataFrame(
{
"date": pd.DatetimeIndex(
[
"20121002",
"20121007",
"20130130",
"20130202",
"20130305",
"20121002",
"20121207",
"20130130",
"20130202",
"20130305",
"20130202",
"20130305",
]
),
"user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
"whole_cost": [
1790,
np.nan,
280,
259,
np.nan,
623,
90,
312,
np.nan,
301,
359,
801,
],
"cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12],
}
).set_index(["date", "user_id"])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position="last")
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position="first")
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position="last")
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position="first")
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_index_nan(self):
# GH#3917
# Test DataFrame with nan label
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=True, na_position="last")
expected = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position="first")
expected = DataFrame(
{"A": [4, 1, 2, np.nan, 1, 6, 8], "B": [5, 9, np.nan, 5, 2, 5, 4]},
index=[np.nan, 1, 2, 3, 4, 5, 6],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=False)
expected = DataFrame(
{"A": [8, 6, 1, np.nan, 2, 1, 4], "B": [4, 5, 2, 5, np.nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind="quicksort", ascending=False, na_position="first"
)
expected = DataFrame(
{"A": [4, 8, 6, 1, np.nan, 2, 1], "B": [5, 4, 5, 2, 5, np.nan, 9]},
index=[np.nan, 6, 5, 4, 3, 2, 1],
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_index_multi_index(self):
# GH#25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
)
result = df.set_index(list("abc")).sort_index(level=list("ba"))
expected = DataFrame(
{"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
)
expected = expected.set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered["A"])
df = unordered.copy()
return_value = df.sort_index(inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
assert a_id != id(df["A"])
df = unordered.copy()
return_value = df.sort_index(ascending=False, inplace=True)
assert return_value is None
expected = frame[::-1]
tm.assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ["D", "B", "C", "A"]]
df = unordered.copy()
return_value = df.sort_index(axis=1, inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
df = unordered.copy()
return_value = df.sort_index(axis=1, ascending=False, inplace=True)
assert return_value is None
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
# test with multiindex, too
idf = df.set_index(["A", "B"])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
tm.assert_frame_equal(result, expected)
# also, Series!
result = idf["C"].sort_index(ascending=[1, 0])
tm.assert_series_equal(result, expected["C"])
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(level="A", sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["A", "B"], sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
# Error thrown by sort_index when
# first index is sorted last (GH#26053)
result = df.sort_index(level=["C", "B", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["B", "C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
def test_sort_index_categorical_index(self):
df = DataFrame(
{
"A": np.arange(6, dtype="int64"),
"B": Series(list("aabbca")).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_frame_equal(result, expected)
def test_sort_index(self):
# GH#13496
frame = DataFrame(
np.arange(16).reshape(4, 4),
index=[1, 2, 3, 4],
columns=["A", "B", "C", "D"],
)
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
tm.assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
tm.assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
tm.assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", ["A", 0]) # GH#21052
def test_sort_index_multiindex(self, level):
# GH#13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples(
[[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list("ABC")
)
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 2], [2, 1, 3]], names=list("ABC")
)
expected = DataFrame([[5, 6], [3, 4], [1, 2]], index=expected_mi)
result = df.sort_index(level=level)
tm.assert_frame_equal(result, expected)
# sort_remaining=False
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 3], [2, 1, 2]], names=list("ABC")
)
expected = DataFrame([[5, 6], [1, 2], [3, 4]], index=expected_mi)
result = df.sort_index(level=level, sort_remaining=False)
tm.assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)), bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=["Y", "X1", "X2"])
result = model.groupby(["X1", "X2"], observed=True).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], closed="right"
)
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, False, [5, 3, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, False, [2, 3, 5]),
],
)
def test_sort_index_ignore_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114
original_index = [2, 5, 3]
df = DataFrame(original_dict, index=original_index)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=original_index))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
False,
MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB")),
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
False,
MultiIndex.from_tuples([[3, 4], [2, 1]], names=list("AB")),
),
],
)
def test_sort_index_ignore_index_multi_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114, this is to test ignore_index on MulitIndex of index
mi = MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB"))
df = DataFrame(original_dict, index=mi)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=mi))
def test_sort_index_categorical_multiindex(self):
# GH#15058
df = DataFrame(
{
"a": range(6),
"l1": pd.Categorical(
["a", "a", "b", "b", "c", "c"],
categories=["c", "a", "b"],
ordered=True,
),
"l2": [0, 1, 0, 1, 0, 1],
}
)
result = df.set_index(["l1", "l2"]).sort_index()
expected = DataFrame(
[4, 5, 0, 1, 2, 3],
columns=["a"],
index=MultiIndex(
levels=[
CategoricalIndex(
["c", "a", "b"],
categories=["c", "a", "b"],
ordered=True,
name="l1",
dtype="category",
),
[0, 1],
],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=["l1", "l2"],
),
)
tm.assert_frame_equal(result, expected)
def test_sort_index_and_reconstruction(self):
# GH#15622
# lexsortedness should be identical
# across MultiIndex construction methods
df = DataFrame([[1, 1], [2, 2]], index=list("ab"))
expected = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_tuples(
[(0.5, "a"), (0.5, "b"), (0.8, "a"), (0.8, "b")]
),
)
assert expected.index.is_lexsorted()
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_product([[0.5, 0.8], list("ab")]),
)
result = result.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex(
levels=[[0.5, 0.8], ["a", "b"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = result.sort_index()
assert result.index.is_lexsorted()
tm.assert_frame_equal(result, expected)
concatted = pd.concat([df, df], keys=[0.8, 0.5])
result = concatted.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# GH#14015
df = DataFrame(
[[1, 2], [6, 7]],
columns=MultiIndex.from_tuples(
[(0, "20160811 12:00:00"), (0, "20160809 12:00:00")],
names=["l1", "Date"],
),
)
df.columns = df.columns.set_levels(
pd.to_datetime(df.columns.levels[1]), level=1
)
assert not df.columns.is_lexsorted()
assert not df.columns.is_monotonic
result = df.sort_index(axis=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
result = df.sort_index(axis=1, level=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
# TODO: better name, de-duplicate with test_sort_index_level above
def test_sort_index_level2(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
df = frame.copy()
df.index = np.arange(len(df))
# axis=1
# series
a_sorted = frame["A"].sort_index(level=0)
# preserve names
assert a_sorted.index.names == frame.index.names
# inplace
rs = frame.copy()
return_value = rs.sort_index(level=0, inplace=True)
assert return_value is None
tm.assert_frame_equal(rs, frame.sort_index(level=0))
def test_sort_index_level_large_cardinality(self):
# GH#2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int64)
# it works!
result = df.sort_index(level=0)
assert result.index.lexsort_depth == 3
# GH#2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int32)
# it works!
result = df.sort_index(level=0)
assert (result.dtypes.values == df.dtypes.values).all()
assert result.index.lexsort_depth == 3
def test_sort_index_level_by_name(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
frame.index.names = ["first", "second"]
result = frame.sort_index(level="second")
expected = frame.sort_index(level=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level_mixed(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
sorted_before = frame.sort_index(level=1)
df = frame.copy()
df["foo"] = "bar"
sorted_after = df.sort_index(level=1)
tm.assert_frame_equal(sorted_before, sorted_after.drop(["foo"], axis=1))
dft = frame.T
sorted_before = dft.sort_index(level=1, axis=1)
dft["foo", "three"] = "bar"
sorted_after = dft.sort_index(level=1, axis=1)
tm.assert_frame_equal(
sorted_before.drop([("foo", "three")], axis=1),
sorted_after.drop([("foo", "three")], axis=1),
)
def test_sort_index_preserve_levels(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.sort_index()
assert result.index.names == frame.index.names
@pytest.mark.parametrize(
"gen,extra",
[
([1.0, 3.0, 2.0, 5.0], 4.0),
([1, 3, 2, 5], 4),
(
[
Timestamp("20130101"),
Timestamp("20130103"),
Timestamp("20130102"),
Timestamp("20130105"),
],
Timestamp("20130104"),
),
(["1one", "3one", "2one", "5one"], "4one"),
],
)
def test_sort_index_multilevel_repr_8017(self, gen, extra):
np.random.seed(0)
data = np.random.randn(3, 4)
columns = MultiIndex.from_tuples([("red", i) for i in gen])
df = DataFrame(data, index=list("def"), columns=columns)
df2 = pd.concat(
[
df,
DataFrame(
"world",
index=list("def"),
columns=MultiIndex.from_tuples([("red", extra)]),
),
],
axis=1,
)
# check that the repr is good
# make sure that we have a correct sparsified repr
# e.g. only 1 header of read
assert str(df2).splitlines()[0].split() == ["red"]
# GH 8017
# sorting fails after columns added
# construct single-dtype then sort
result = df.copy().sort_index(axis=1)
expected = df.iloc[:, [0, 2, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df2.sort_index(axis=1)
expected = df2.iloc[:, [0, 2, 1, 4, 3]]
tm.assert_frame_equal(result, expected)
# setitem then sort
result = df.copy()
result[("red", extra)] = "world"
result = result.sort_index(axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"categories",
[
pytest.param(["a", "b", "c"], id="str"),
pytest.param(
[pd.Interval(0, 1), pd.Interval(1, 2), pd.Interval(2, 3)],
id="pd.Interval",
),
],
)
def test_sort_index_with_categories(self, categories):
# GH#23452
df = DataFrame(
{"foo": range(len(categories))},
index=CategoricalIndex(
data=categories, categories=categories, ordered=True
),
)
df.index = df.index.reorder_categories(df.index.categories[::-1])
result = df.sort_index()
expected = DataFrame(
{"foo": reversed(range(len(categories)))},
index=CategoricalIndex(
data=categories[::-1], categories=categories[::-1], ordered=True
),
)
tm.assert_frame_equal(result, expected)
class TestDataFrameSortIndexKey:
def test_sort_multi_index_key(self):
# GH 25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
).set_index(list("abc"))
result = df.sort_index(level=list("ac"), key=lambda x: x)
expected = DataFrame(
{"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
).set_index(list("abc"))
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=list("ac"), key=lambda x: -x)
expected = DataFrame(
{"a": [3, 2, 1], "b": [0, 0, 0], "c": [0, 2, 1], "d": list("acb")}
).set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_sort_index_key(self): # issue 27237
df = DataFrame(np.arange(6, dtype="int64"), index=list("aaBBca"))
result = df.sort_index()
expected = df.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(key=lambda x: x.str.lower())
expected = df.iloc[[0, 1, 5, 2, 3, 4]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(key=lambda x: x.str.lower(), ascending=False)
expected = df.iloc[[4, 2, 3, 0, 1, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_index_key_int(self):
df = DataFrame(np.arange(6, dtype="int64"), index=np.arange(6, dtype="int64"))
result = df.sort_index()
tm.assert_frame_equal(result, df)
result = df.sort_index(key=lambda x: -x)
expected = df.sort_index(ascending=False)
tm.assert_frame_equal(result, expected)
result = df.sort_index(key=lambda x: 2 * x)
tm.assert_frame_equal(result, df)
def test_sort_multi_index_key_str(self):
# GH 25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": ["B", "a", "C"], "b": [0, 1, 0], "c": list("abc"), "d": [0, 1, 2]}
).set_index(list("abc"))
result = df.sort_index(level="a", key=lambda x: x.str.lower())
expected = DataFrame(
{"a": ["a", "B", "C"], "b": [1, 0, 0], "c": list("bac"), "d": [1, 0, 2]}
).set_index(list("abc"))
tm.assert_frame_equal(result, expected)
result = df.sort_index(
level=list("abc"), # can refer to names
key=lambda x: x.str.lower() if x.name in ["a", "c"] else -x,
)
expected = DataFrame(
{"a": ["a", "B", "C"], "b": [1, 0, 0], "c": list("bac"), "d": [1, 0, 2]}
).set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_changes_length_raises(self):
df =
|
DataFrame({"A": [1, 2, 3]})
|
pandas.DataFrame
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
|
Series([False] * 4)
|
pandas.Series
|
import argparse
from glob import glob
import numpy as np
import pandas as pd
def parse_arguments(parser):
parser.add_argument('--data_dir', type=str, default=None)
parser.add_argument('--output_dir', type=str, default=None)
parser.add_argument('--mode', type=str, default='test')
parser.add_argument('--test_file', type=str, default='test.tsv')
parser.add_argument('--text_only', type=bool, default=True)
parser.add_argument('--train_blender', type=bool, default=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parse_arguments(parser)
assert (args.data_dir)
# Import the real test data
test_df = pd.read_csv(args.data_dir + '/test.csv')
# Importing the event code dictionary to convert the BERT indices
code_df = pd.read_csv(args.data_dir + '/code_dict.csv')
code_dict = dict(zip(code_df.value, code_df.event_code))
# Importing the scores from the 4 BERT runs
if args.mode == 'validate':
run_folder = 'val_runs'
elif args.mode == 'test':
run_folder = 'test_runs'
prob_list = []
for fn in sorted(glob(args.output_dir + '/[0-9]')):
print(fn)
run_probs = np.array(
pd.read_csv(fn + '/test_results.tsv', sep='\t', header=None))
test_df['event'] = [
code_dict[code] for code in np.argmax(run_probs, axis=1)
]
test_df.to_csv(fn + '/solution.csv', header=True, index=False)
prob_list.append(run_probs)
assert (prob_list)
prob_list = np.array(prob_list)
# Grouping the probabilities for regular averaging
avg_probs = np.mean(prob_list, axis=0)
print(avg_probs)
assert (np.allclose(np.sum(avg_probs, axis=1), np.ones(test_df.shape[0])))
avg_guesses = np.array(
[code_dict[code] for code in np.argmax(avg_probs, axis=1)])
# Grouping the probabilities for blending
wide_probs = np.concatenate(prob_list, axis=1)
# Producing guesses when only the input text is available
if args.text_only:
# Loading the blender model
# lgr = joblib.load(args.data_dir + 'blender.joblib')
# blend_guesses = lgr.predict(wide_probs)
# blend_probs = np.max(lgr.predict_proba(wide_probs), axis=1)
# print(blend_probs[0])
# Exporting the guesses to disk
ids = pd.read_csv(args.data_dir + '/' + args.test_file, sep='\t')['id']
guess_df = pd.DataFrame(
pd.concat([
ids,
pd.Series(avg_guesses),
pd.Series(np.max(avg_probs, axis=1))
],
axis=1))
guess_df.columns = ['id', 'avg_guess', 'avg_prob']
guess_df.to_csv(args.output_dir + '/guesses.csv',
header=True,
index=False)
test_df['event'] = avg_guesses
test_df.to_csv(args.output_dir + '/solution.csv',
header=True,
index=False)
# Producing guesses and scores when the labels are also available
else:
# Getting the guesses from the blending model
if args.train_blender:
targets = pd.read_csv(args.data_dir + '/' +
args.test_file)['event']
lgr = LogisticRegression()
lgr.fit(wide_probs, targets)
joblib.dump(lgr, args.data_dir + 'blender.joblib')
else:
lgr = joblib.load(args.data_dir + 'blender.joblib')
blend_guesses = lgr.predict(wide_probs)
# Importing the test records and getting the various scores
test_records = pd.read_csv(args.data_dir + args.test_file)
targets = np.array(test_records.event)
avg_f1 = f1_score(targets, avg_guesses, average='weighted')
blend_f1 = f1_score(targets, blend_guesses, average='weighted')
print('')
print('Weighted macro f1 on the test set is ' + str(avg_f1) +
' with averaging and ' + str(blend_f1) + ' with blending.')
# Writing results to disk
results = pd.DataFrame(
pd.concat([
test_records.id, test_records.text, test_records.event,
|
pd.Series(avg_guesses)
|
pandas.Series
|
"""
This script is designed to perform table statistics
"""
import pandas as pd
import numpy as np
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import os
from Utils.lc_read_write_mat import read_mat
#%% ----------------------------------Our center 550----------------------------------
uid_path_550 = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\selected_550.txt'
scale_path_550 = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\10-24大表.xlsx'
scale_data_550 = pd.read_excel(scale_path_550)
uid_550 = pd.read_csv(uid_path_550, header=None)
scale_selected_550 =
|
pd.merge(uid_550, scale_data_550, left_on=0, right_on='folder', how='inner')
|
pandas.merge
|
from __future__ import division
from builtins import str
from builtins import object
__copyright__ = "Copyright 2015 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import numpy as np
import pandas as pd
from .Error import NetworkInputError, NotImplementedError, UnexpectedError
from .Logger import FastTripsLogger
from .Util import Util
class Route(object):
"""
Route class.
One instance represents all of the Routes.
Stores route information in :py:attr:`Route.routes_df` and agency information in
:py:attr:`Route.agencies_df`. Each are instances of :py:class:`pandas.DataFrame`.
Fare information is in :py:attr:`Route.fare_attrs_df`, :py:attr:`Route.fare_rules_df` and
:py:attr:`Route.fare_transfer_rules_df`.
"""
#: File with fasttrips routes information (this extends the
#: `gtfs routes <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/routes.md>`_ file).
#: See `routes_ft specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/routes_ft.md>`_.
INPUT_ROUTES_FILE = "routes_ft.txt"
#: gtfs Routes column name: Unique identifier
ROUTES_COLUMN_ROUTE_ID = "route_id"
#: gtfs Routes column name: Short name
ROUTES_COLUMN_ROUTE_SHORT_NAME = "route_short_name"
#: gtfs Routes column name: Long name
ROUTES_COLUMN_ROUTE_LONG_NAME = "route_long_name"
#: gtfs Routes column name: Route type
ROUTES_COLUMN_ROUTE_TYPE = "route_type"
#: gtfs Routes column name: Agency ID
ROUTES_COLUMN_AGENCY_ID = "agency_id"
#: fasttrips Routes column name: Mode
ROUTES_COLUMN_MODE = "mode"
#: fasttrips Routes column name: Proof of Payment
ROUTES_COLUMN_PROOF_OF_PAYMENT = "proof_of_payment"
# ========== Added by fasttrips =======================================================
#: fasttrips Routes column name: Mode number
ROUTES_COLUMN_ROUTE_ID_NUM = "route_id_num"
#: fasttrips Routes column name: Mode number
ROUTES_COLUMN_MODE_NUM = "mode_num"
#: fasttrips Routes column name: Mode type
ROUTES_COLUMN_MODE_TYPE = "mode_type"
#: Value for :py:attr:`Route.ROUTES_COLUMN_MODE_TYPE` column: access
MODE_TYPE_ACCESS = "access"
#: Value for :py:attr:`Route.ROUTES_COLUMN_MODE_TYPE` column: egress
MODE_TYPE_EGRESS = "egress"
#: Value for :py:attr:`Route.ROUTES_COLUMN_MODE_TYPE` column: transit
MODE_TYPE_TRANSIT = "transit"
#: Value for :py:attr:`Route.ROUTES_COLUMN_MODE_TYPE` column: transfer
MODE_TYPE_TRANSFER = "transfer"
#: Access mode numbers start from here
MODE_NUM_START_ACCESS = 101
#: Egress mode numbers start from here
MODE_NUM_START_EGRESS = 201
#: Route mode numbers start from here
MODE_NUM_START_ROUTE = 301
#: File with fasttrips fare attributes information (this *subsitutes rather than extends* the
#: `gtfs fare_attributes <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_attributes_ft.md>`_ file).
#: See `fare_attributes_ft specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_attributes_ft.md>`_.
INPUT_FARE_ATTRIBUTES_FILE = "fare_attributes_ft.txt"
# fasttrips Fare attributes column name: Fare Period
FARE_ATTR_COLUMN_FARE_PERIOD = "fare_period"
# fasttrips Fare attributes column name: Price
FARE_ATTR_COLUMN_PRICE = "price"
# fasttrips Fare attributes column name: Currency Type
FARE_ATTR_COLUMN_CURRENCY_TYPE = "currency_type"
# fasttrips Fare attributes column name: Payment Method
FARE_ATTR_COLUMN_PAYMENT_METHOD = "payment_method"
# fasttrips Fare attributes column name: Transfers (number permitted on this fare)
FARE_ATTR_COLUMN_TRANSFERS = "transfers"
# fasttrips Fare attributes column name: Transfer duration (Integer length of time in seconds before transfer expires. Omit or leave empty if they do not.)
FARE_ATTR_COLUMN_TRANSFER_DURATION = "transfer_duration"
#: File with fasttrips fare periods information
#: See `fare_rules_ft specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_rules_ft.md>`_.
INPUT_FARE_PERIODS_FILE = "fare_periods_ft.txt"
#: fasttrips Fare rules column name: Fare ID
FARE_RULES_COLUMN_FARE_ID = "fare_id"
#: GTFS fare rules column name: Route ID
FARE_RULES_COLUMN_ROUTE_ID = ROUTES_COLUMN_ROUTE_ID
#: GTFS fare rules column name: Origin Zone ID
FARE_RULES_COLUMN_ORIGIN_ID = "origin_id"
#: GTFS fare rules column name: Destination Zone ID
FARE_RULES_COLUMN_DESTINATION_ID = "destination_id"
#: GTFS fare rules column name: Contains ID
FARE_RULES_COLUMN_CONTAINS_ID = "contains_id"
#: fasttrips Fare rules column name: Fare class
FARE_RULES_COLUMN_FARE_PERIOD = FARE_ATTR_COLUMN_FARE_PERIOD
#: fasttrips Fare rules column name: Start time for the fare. A DateTime
FARE_RULES_COLUMN_START_TIME = "start_time"
#: fasttrips Fare rules column name: End time for the fare rule. A DateTime.
FARE_RULES_COLUMN_END_TIME = "end_time"
# ========== Added by fasttrips =======================================================
#: fasttrips Fare rules column name: Fare ID num
FARE_RULES_COLUMN_FARE_ID_NUM = "fare_id_num"
#: fasttrips Fare rules column name: Route ID num
FARE_RULES_COLUMN_ROUTE_ID_NUM = ROUTES_COLUMN_ROUTE_ID_NUM
#: fasttrips fare rules column name: Origin Zone ID number
FARE_RULES_COLUMN_ORIGIN_ID_NUM = "origin_id_num"
#: fasttrips fare rules column name: Destination ID number
FARE_RULES_COLUMN_DESTINATION_ID_NUM = "destination_id_num"
#: File with fasttrips fare transfer rules information.
#: See `fare_transfer_rules specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_transfer_rules_ft.md>`_.
INPUT_FARE_TRANSFER_RULES_FILE = "fare_transfer_rules_ft.txt"
#: fasttrips Fare transfer rules column name: From Fare Class
FARE_TRANSFER_RULES_COLUMN_FROM_FARE_PERIOD = "from_fare_period"
#: fasttrips Fare transfer rules column name: To Fare Class
FARE_TRANSFER_RULES_COLUMN_TO_FARE_PERIOD = "to_fare_period"
#: fasttrips Fare transfer rules column name: Transfer type?
FARE_TRANSFER_RULES_COLUMN_TYPE = "transfer_fare_type"
#: fasttrips Fare transfer rules column name: Transfer amount (discount or fare)
FARE_TRANSFER_RULES_COLUMN_AMOUNT = "transfer_fare"
#: Value for :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_TYPE`: transfer discount
TRANSFER_TYPE_TRANSFER_DISCOUNT = "transfer_discount"
#: Value for :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_TYPE`: free transfer
TRANSFER_TYPE_TRANSFER_FREE = "transfer_free"
#: Value for :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_TYPE`: transfer fare cost
TRANSFER_TYPE_TRANSFER_COST = "transfer_cost"
#: Valid options for :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_TYPE`
TRANSFER_TYPE_OPTIONS = [TRANSFER_TYPE_TRANSFER_DISCOUNT,
TRANSFER_TYPE_TRANSFER_FREE,
TRANSFER_TYPE_TRANSFER_COST]
#: File with route ID, route ID number correspondence (and fare id num)
OUTPUT_ROUTE_ID_NUM_FILE = "ft_intermediate_route_id.txt"
#: File with fare id num, fare id, fare class, price, xfers
OUTPUT_FARE_ID_FILE = "ft_intermediate_fare.txt"
#: File with fare transfer rules
OUTPUT_FARE_TRANSFER_FILE = "ft_intermediate_fare_transfers.txt"
#: File with mode, mode number correspondence
OUTPUT_MODE_NUM_FILE = "ft_intermediate_supply_mode_id.txt"
def __init__(self, input_archive, output_dir, gtfs, today, stops):
"""
Constructor. Reads the gtfs data from the transitfeed schedule, and the additional
fast-trips routes data from the input file in *input_archive*.
"""
self.output_dir = output_dir
self.routes_df = gtfs.routes
FastTripsLogger.info("Read %7d %15s from %25d %25s" %
(len(self.routes_df), 'date valid route', len(gtfs.routes), 'total routes'))
# Read the fast-trips supplemental routes data file
routes_ft_df = gtfs.get(Route.INPUT_ROUTES_FILE)
# verify required columns are present
routes_ft_cols = list(routes_ft_df.columns.values)
assert(Route.ROUTES_COLUMN_ROUTE_ID in routes_ft_cols)
assert(Route.ROUTES_COLUMN_MODE in routes_ft_cols)
# verify no routes_ids are duplicated
if routes_ft_df.duplicated(subset=[Route.ROUTES_COLUMN_ROUTE_ID]).sum()>0:
error_msg = "Found %d duplicate %s in %s" % (routes_ft_df.duplicated(subset=[Route.ROUTES_COLUMN_ROUTE_ID]).sum(),
Route.ROUTES_COLUMN_ROUTE_ID, Route.INPUT_ROUTES_FILE)
FastTripsLogger.fatal(error_msg)
FastTripsLogger.fatal("\nDuplicates:\n%s" % \
str(routes_ft_df.loc[routes_ft_df.duplicated(subset=[Route.ROUTES_COLUMN_ROUTE_ID])]))
raise NetworkInputError(Route.INPUT_ROUTES_FILE, error_msg)
# Join to the routes dataframe
self.routes_df = pd.merge(left=self.routes_df, right=routes_ft_df,
how='left',
on=Route.ROUTES_COLUMN_ROUTE_ID)
# Get the mode list
self.modes_df = self.routes_df[[Route.ROUTES_COLUMN_MODE]].drop_duplicates().reset_index(drop=True)
self.modes_df[Route.ROUTES_COLUMN_MODE_NUM] = self.modes_df.index + Route.MODE_NUM_START_ROUTE
self.modes_df[Route.ROUTES_COLUMN_MODE_TYPE] = Route.MODE_TYPE_TRANSIT
# Join to mode numbering
self.routes_df = Util.add_new_id(self.routes_df, Route.ROUTES_COLUMN_MODE, Route.ROUTES_COLUMN_MODE_NUM,
self.modes_df, Route.ROUTES_COLUMN_MODE, Route.ROUTES_COLUMN_MODE_NUM)
# Route IDs are strings. Create a unique numeric route ID.
self.route_id_df = Util.add_numeric_column(self.routes_df[[Route.ROUTES_COLUMN_ROUTE_ID]],
id_colname=Route.ROUTES_COLUMN_ROUTE_ID,
numeric_newcolname=Route.ROUTES_COLUMN_ROUTE_ID_NUM)
FastTripsLogger.debug("Route ID to number correspondence\n" + str(self.route_id_df.head()))
FastTripsLogger.debug(str(self.route_id_df.dtypes))
self.routes_df = self.add_numeric_route_id(self.routes_df,
id_colname=Route.ROUTES_COLUMN_ROUTE_ID,
numeric_newcolname=Route.ROUTES_COLUMN_ROUTE_ID_NUM)
FastTripsLogger.debug("=========== ROUTES ===========\n" + str(self.routes_df.head()))
FastTripsLogger.debug("\n"+str(self.routes_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s, %25s" %
(len(self.routes_df), "routes", "routes.txt", Route.INPUT_ROUTES_FILE))
self.agencies_df = gtfs.agency
FastTripsLogger.debug("=========== AGENCIES ===========\n" + str(self.agencies_df.head()))
FastTripsLogger.debug("\n"+str(self.agencies_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.agencies_df), "agencies", "agency.txt"))
self.fare_attrs_df = gtfs.fare_attributes
FastTripsLogger.debug("=========== FARE ATTRIBUTES ===========\n" + str(self.fare_attrs_df.head()))
FastTripsLogger.debug("\n"+str(self.fare_attrs_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.fare_attrs_df), "fare attributes", "fare_attributes.txt"))
# subsitute fasttrips fare attributes
self.fare_attrs_df = gtfs.get(Route.INPUT_FARE_ATTRIBUTES_FILE)
if not self.fare_attrs_df.empty:
# verify required columns are present
fare_attrs_cols = list(self.fare_attrs_df.columns.values)
assert(Route.FARE_ATTR_COLUMN_FARE_PERIOD in fare_attrs_cols)
assert(Route.FARE_ATTR_COLUMN_PRICE in fare_attrs_cols)
assert(Route.FARE_ATTR_COLUMN_CURRENCY_TYPE in fare_attrs_cols)
assert(Route.FARE_ATTR_COLUMN_PAYMENT_METHOD in fare_attrs_cols)
assert(Route.FARE_ATTR_COLUMN_TRANSFERS in fare_attrs_cols)
if Route.FARE_ATTR_COLUMN_TRANSFER_DURATION not in fare_attrs_cols:
self.fare_attrs_df[Route.FARE_ATTR_COLUMN_TRANSFER_DURATION] = np.nan
FastTripsLogger.debug("===> REPLACED BY FARE ATTRIBUTES FT\n" + str(self.fare_attrs_df.head()))
FastTripsLogger.debug("\n"+str(self.fare_attrs_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.fare_attrs_df), "fare attributes", Route.INPUT_FARE_ATTRIBUTES_FILE))
#: fares are by fare_period rather than by fare_id
self.fare_by_class = True
else:
self.fare_by_class = False
# Fare rules (map routes to fare_id)
self.fare_rules_df = gtfs.fare_rules
if len(self.fare_rules_df) > 0:
self.fare_ids_df = Util.add_numeric_column(self.fare_rules_df[[Route.FARE_RULES_COLUMN_FARE_ID]],
id_colname=Route.FARE_RULES_COLUMN_FARE_ID,
numeric_newcolname=Route.FARE_RULES_COLUMN_FARE_ID_NUM)
self.fare_rules_df = pd.merge(left =self.fare_rules_df,
right =self.fare_ids_df,
how ="left")
else:
self.fare_ids_df = pd.DataFrame()
# optionally reverse those with origin/destinations if configured
from .Assignment import Assignment
if Assignment.FARE_ZONE_SYMMETRY:
FastTripsLogger.debug("applying FARE_ZONE_SYMMETRY to %d fare rules" % len(self.fare_rules_df))
# select only those with an origin and destination
reverse_fare_rules = self.fare_rules_df.loc[ pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_ORIGIN_ID])&
pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_DESTINATION_ID]) ].copy()
# FastTripsLogger.debug("reverse_fare_rules 1 head()=\n%s" % str(reverse_fare_rules.head()))
# reverse them
reverse_fare_rules.rename(columns={Route.FARE_RULES_COLUMN_ORIGIN_ID : Route.FARE_RULES_COLUMN_DESTINATION_ID,
Route.FARE_RULES_COLUMN_DESTINATION_ID : Route.FARE_RULES_COLUMN_ORIGIN_ID},
inplace=True)
# FastTripsLogger.debug("reverse_fare_rules 2 head()=\n%s" % str(reverse_fare_rules.head()))
# join them to eliminate dupes
reverse_fare_rules = pd.merge(left =reverse_fare_rules,
right =self.fare_rules_df,
how ="left",
on =[Route.FARE_RULES_COLUMN_FARE_ID,
Route.FARE_RULES_COLUMN_FARE_ID_NUM,
Route.FARE_RULES_COLUMN_ROUTE_ID,
Route.FARE_RULES_COLUMN_ORIGIN_ID,
Route.FARE_RULES_COLUMN_DESTINATION_ID,
Route.FARE_RULES_COLUMN_CONTAINS_ID],
indicator=True)
# dupes exist in both -- drop those
reverse_fare_rules = reverse_fare_rules.loc[ reverse_fare_rules["_merge"]=="left_only"]
reverse_fare_rules.drop(["_merge"], axis=1, inplace=True)
# add them to fare rules
self.fare_rules_df = pd.concat([self.fare_rules_df, reverse_fare_rules])
FastTripsLogger.debug("fare rules with symmetry %d head()=\n%s" % (len(self.fare_rules_df), str(self.fare_rules_df.head())))
# sort by fare ID num so zone-to-zone and their reverse are together
if len(self.fare_rules_df) > 0:
self.fare_rules_df.sort_values(by=[Route.FARE_RULES_COLUMN_FARE_ID_NUM], inplace=True)
fare_rules_ft_df = gtfs.get(Route.INPUT_FARE_PERIODS_FILE)
if not fare_rules_ft_df.empty:
# verify required columns are present
fare_rules_ft_cols = list(fare_rules_ft_df.columns.values)
assert(Route.FARE_RULES_COLUMN_FARE_ID in fare_rules_ft_cols)
assert(Route.FARE_RULES_COLUMN_FARE_PERIOD in fare_rules_ft_cols)
assert(Route.FARE_RULES_COLUMN_START_TIME in fare_rules_ft_cols)
assert(Route.FARE_RULES_COLUMN_END_TIME in fare_rules_ft_cols)
# Split fare classes so they don't overlap
fare_rules_ft_df = self.remove_fare_period_overlap(fare_rules_ft_df)
# join to fare rules dataframe
self.fare_rules_df = pd.merge(left=self.fare_rules_df, right=fare_rules_ft_df,
how='left',
on=Route.FARE_RULES_COLUMN_FARE_ID)
# add route id numbering if applicable
if Route.FARE_RULES_COLUMN_ROUTE_ID in list(self.fare_rules_df.columns.values):
self.fare_rules_df = self.add_numeric_route_id(self.fare_rules_df,
Route.FARE_RULES_COLUMN_ROUTE_ID,
Route.FARE_RULES_COLUMN_ROUTE_ID_NUM)
# add origin zone numbering if applicable
if (Route.FARE_RULES_COLUMN_ORIGIN_ID in list(self.fare_rules_df.columns.values)) and \
(pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_ORIGIN_ID]).sum() > 0):
self.fare_rules_df = stops.add_numeric_stop_zone_id(self.fare_rules_df,
Route.FARE_RULES_COLUMN_ORIGIN_ID,
Route.FARE_RULES_COLUMN_ORIGIN_ID_NUM)
# add destination zone numbering if applicable
if (Route.FARE_RULES_COLUMN_DESTINATION_ID in list(self.fare_rules_df.columns.values)) and \
(pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_DESTINATION_ID]).sum() > 0):
self.fare_rules_df = stops.add_numeric_stop_zone_id(self.fare_rules_df,
Route.FARE_RULES_COLUMN_DESTINATION_ID,
Route.FARE_RULES_COLUMN_DESTINATION_ID_NUM)
# They should both be present
# This is unlikely
if Route.FARE_RULES_COLUMN_ORIGIN_ID not in list(self.fare_rules_df.columns.values):
error_str = "Fast-trips only supports both origin_id and destination_id or neither in fare rules"
FastTripsLogger.fatal(error_str)
raise NotImplementedError(error_str)
# check for each row, either both are present or neither -- use xor, or ^
xor_id = self.fare_rules_df.loc[ pd.isnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_ORIGIN_ID])^
pd.isnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_DESTINATION_ID]) ]
if len(xor_id) > 0:
error_str = "Fast-trips supports fare rules with both origin id and destination id specified, or neither ONLY.\n%s" % str(xor_id)
FastTripsLogger.fatal(error_str)
raise NotImplementedError(error_str)
# We don't support contains_id
if Route.FARE_RULES_COLUMN_CONTAINS_ID in list(self.fare_rules_df.columns.values):
non_null_contains_id = self.fare_rules_df.loc[pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_CONTAINS_ID])]
if len(non_null_contains_id) > 0:
error_str = "Fast-trips does not support contains_id in fare rules:\n%s" % str(non_null_contains_id)
FastTripsLogger.fatal(error_str)
raise NotImplementedError(error_str)
# We don't support rows with only one of origin_id or destination_id specified
elif len(self.fare_rules_df) > 0:
# we have fare rules but no fare periods -- make the fare periods the same
self.fare_rules_df[Route.FARE_RULES_COLUMN_FARE_PERIOD] = self.fare_rules_df[Route.FARE_RULES_COLUMN_FARE_ID]
self.fare_rules_df[Route.FARE_RULES_COLUMN_START_TIME] = Util.read_time("00:00:00")
self.fare_rules_df[Route.FARE_RULES_COLUMN_END_TIME ] = Util.read_time("24:00:00")
# join to fare_attributes on fare_period if we have it, or fare_id if we don't
if len(self.fare_rules_df) > 0:
"""
Fare ID/class (fare period)/attribute mapping.
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
| *Column name* | Column Description |
+=======================+======================================================================================================================================+
|``fare_id`` |GTFS fare_id (See `fare_rules`_) |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``fare_id_num`` |Numbered fare_id |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``route_id`` |(optional) Route(s) associated with this fare ID. (See `fare_rules`_) |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``origin_id`` |(optional) Origin fare zone ID(s) for fare ID. (See `fare_rules`_) |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``origin_id_num`` |(optional) Origin fare zone number for fare ID. |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``destination_id`` |(optional) Destination fare zone ID(s) for fare ID. (See `fare_rules`_) |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``destination_id_num`` |(optional) Destination fare zone number for fare ID. |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``contains_id`` |(optional) Contains fare zone ID(s) for fare ID. (See `fare_rules`_) |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``fare_period`` |GTFS-plus fare_period (See `fare_periods_ft`_) |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``start_time`` |Fare class start time (See `fare_rules_ft`_) |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``end_time`` |Fare class end time (See `fare_rules_ft`_) |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``currency_type`` |Currency of fare class or id (See `fare_attributes`_ or `fare_attributes_ft`_) |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``price`` |Price of fare class or id (See `fare_attributes`_ or `fare_attributes_ft`_) |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``payment_method`` |When the fare must be paid (See `fare_attributes`_ or `fare_attributes_ft`_) |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``transfers`` |Number of transfers permiited on this fare (See `fare_attributes`_ or `fare_attributes_ft`_) |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|``transfer_duration`` |(optional) Integer length of time in seconds before transfer expires (See `fare_attributes`_ or `fare_attributes_ft`_) |
+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------+
.. _fare_rules: https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_rules.md
.. _fare_rules_ft: https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_rules_ft.md
.. _fare_attributes: https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_attributes.md
.. _fare_attributes_ft: https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/fare_attributes_ft.md
"""
self.fare_rules_df = pd.merge(left =self.fare_rules_df,
right=self.fare_attrs_df,
how ='left',
on = Route.FARE_RULES_COLUMN_FARE_PERIOD if self.fare_by_class else Route.FARE_RULES_COLUMN_FARE_ID)
FastTripsLogger.debug("=========== FARE RULES ===========\n" + str(self.fare_rules_df.head(10).to_string(formatters=\
{Route.FARE_RULES_COLUMN_START_TIME:Util.datetime64_formatter,
Route.FARE_RULES_COLUMN_END_TIME :Util.datetime64_formatter})))
FastTripsLogger.debug("\n"+str(self.fare_rules_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s, %25s" %
(len(self.fare_rules_df), "fare rules", "fare_rules.txt", self.INPUT_FARE_PERIODS_FILE))
self.fare_transfer_rules_df = gtfs.get(Route.INPUT_FARE_TRANSFER_RULES_FILE)
if not self.fare_transfer_rules_df.empty:
# verify required columns are present
fare_transfer_rules_cols = list(self.fare_transfer_rules_df.columns.values)
assert(Route.FARE_TRANSFER_RULES_COLUMN_FROM_FARE_PERIOD in fare_transfer_rules_cols)
assert(Route.FARE_TRANSFER_RULES_COLUMN_TO_FARE_PERIOD in fare_transfer_rules_cols)
assert(Route.FARE_TRANSFER_RULES_COLUMN_TYPE in fare_transfer_rules_cols)
assert(Route.FARE_TRANSFER_RULES_COLUMN_AMOUNT in fare_transfer_rules_cols)
# verify valid values for transfer type
invalid_type = self.fare_transfer_rules_df.loc[ self.fare_transfer_rules_df[Route.FARE_TRANSFER_RULES_COLUMN_TYPE].isin(Route.TRANSFER_TYPE_OPTIONS)==False ]
if len(invalid_type) > 0:
error_msg = "Invalid value for %s:\n%s" % (Route.FARE_TRANSFER_RULES_COLUMN_TYPE, str(invalid_type))
FastTripsLogger.fatal(error_msg)
raise NetworkInputError(Route.INPUT_FARE_TRANSFER_RULES_FILE, error_msg)
# verify the amount is positive
negative_amount = self.fare_transfer_rules_df.loc[ self.fare_transfer_rules_df[Route.FARE_TRANSFER_RULES_COLUMN_AMOUNT] < 0]
if len(negative_amount) > 0:
error_msg = "Negative transfer amounts are invalid:\n%s" % str(negative_amount)
FastTripsLogger.fatal(error_msg)
raise NetworkInputError(Route.INPUT_FARE_TRANSFER_RULES_FILE, error_msg)
FastTripsLogger.debug("=========== FARE TRANSFER RULES ===========\n" + str(self.fare_transfer_rules_df.head()))
FastTripsLogger.debug("\n"+str(self.fare_transfer_rules_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.fare_transfer_rules_df), "fare xfer rules", Route.INPUT_FARE_TRANSFER_RULES_FILE))
else:
self.fare_transfer_rules_df = pd.DataFrame()
self.write_routes_for_extension()
def add_numeric_route_id(self, input_df, id_colname, numeric_newcolname):
"""
Passing a :py:class:`pandas.DataFrame` with a route ID column called *id_colname*,
adds the numeric route id as a column named *numeric_newcolname* and returns it.
"""
return Util.add_new_id(input_df, id_colname, numeric_newcolname,
mapping_df=self.route_id_df,
mapping_id_colname=Route.ROUTES_COLUMN_ROUTE_ID,
mapping_newid_colname=Route.ROUTES_COLUMN_ROUTE_ID_NUM)
def add_access_egress_modes(self, access_modes_df, egress_modes_df):
"""
Adds access and egress modes to the mode list
Writes out mapping to disk
"""
access_modes_df[Route.ROUTES_COLUMN_MODE_TYPE] = Route.MODE_TYPE_ACCESS
egress_modes_df[Route.ROUTES_COLUMN_MODE_TYPE] = Route.MODE_TYPE_EGRESS
implicit_modes_df = pd.DataFrame({Route.ROUTES_COLUMN_MODE_TYPE: [Route.MODE_TYPE_TRANSFER],
Route.ROUTES_COLUMN_MODE: [Route.MODE_TYPE_TRANSFER],
Route.ROUTES_COLUMN_MODE_NUM: [ 1]})
self.modes_df = pd.concat([implicit_modes_df,
self.modes_df,
access_modes_df,
egress_modes_df], axis=0)
self.modes_df.reset_index(inplace=True)
# write intermediate files
self.modes_df.to_csv(os.path.join(self.output_dir, Route.OUTPUT_MODE_NUM_FILE),
columns=[Route.ROUTES_COLUMN_MODE_NUM, Route.ROUTES_COLUMN_MODE],
sep=" ", index=False)
FastTripsLogger.debug("Wrote %s" % os.path.join(self.output_dir, Route.OUTPUT_MODE_NUM_FILE))
def add_numeric_mode_id(self, input_df, id_colname, numeric_newcolname, warn=False):
"""
Passing a :py:class:`pandas.DataFrame` with a mode ID column called *id_colname*,
adds the numeric mode id as a column named *numeric_newcolname* and returns it.
"""
return Util.add_new_id(input_df, id_colname, numeric_newcolname,
mapping_df=self.modes_df[[Route.ROUTES_COLUMN_MODE_NUM, Route.ROUTES_COLUMN_MODE]],
mapping_id_colname=Route.ROUTES_COLUMN_MODE,
mapping_newid_colname=Route.ROUTES_COLUMN_MODE_NUM,
warn=warn)
def remove_fare_period_overlap(self, fare_rules_ft_df):
"""
Split fare classes so they don't overlap
"""
fare_rules_ft_df["fare_period_id"] = fare_rules_ft_df.index+1
# FastTripsLogger.debug("remove_fare_period_overlap: initial\n%s" % fare_rules_ft_df)
max_fare_period_id = fare_rules_ft_df["fare_period_id"].max()
loop_iters = 0
while True:
# join with itself to see if any are contained
df = pd.merge(left =fare_rules_ft_df,
right=fare_rules_ft_df,
on =Route.FARE_RULES_COLUMN_FARE_ID,
how ="outer")
# if there's one fare period per fare id, nothing to do
if len(df)==len(fare_rules_ft_df):
FastTripsLogger.debug("One fare period per fare id, no need to split")
return fare_rules_ft_df
# remove dupes
df = df.loc[ df["fare_period_id_x"] != df["fare_period_id_y"] ]
FastTripsLogger.debug("remove_fare_period_overlap:\n%s" % df)
# this is an overlap -- error
# ____y_______ x starts after y starts
# ______x______ x starts before y ends
# x ends after y ends
intersecting_fare_periods = df.loc[ (df["start_time_x"]>df["start_time_y"])& \
(df["start_time_x"]<df["end_time_y"])& \
(df["end_time_x" ]>df["end_time_y"]) ]
if len(intersecting_fare_periods) > 0:
error_msg = "Partially overlapping fare periods are ambiguous. \n%s" % str(intersecting_fare_periods)
FastTripsLogger.error(error_msg)
raise NetworkInputError(Route.INPUT_FARE_PERIODS_FILE, error_msg)
# is x a subset of y?
# ___x___ x starts after y starts
# ______y_______ x ends before y ends
subset_fare_periods = df.loc[ (df["start_time_x"]>=df["start_time_y"])& \
(df["end_time_x" ]<=df["end_time_y"]) ]
# if no subsets, done -- return
if len(subset_fare_periods) == 0:
FastTripsLogger.debug("remove_fare_period_overlap returning\n%s" % fare_rules_ft_df)
return fare_rules_ft_df
# do one at a time -- split first into three rows
FastTripsLogger.debug("splitting\n%s" % str(subset_fare_periods))
row_dict = subset_fare_periods.head(1).to_dict(orient="records")[0]
FastTripsLogger.debug(row_dict)
y_1 = {'fare_id' :row_dict['fare_id'],
'fare_period' :row_dict['fare_period_y'],
'start_time' :row_dict['start_time_y'],
'end_time' :row_dict['start_time_x'],
'fare_period_id' :row_dict['fare_period_id_y']}
x = {'fare_id' :row_dict['fare_id'],
'fare_period' :row_dict['fare_period_x'],
'start_time' :row_dict['start_time_x'],
'end_time' :row_dict['end_time_x'],
'fare_period_id' :row_dict['fare_period_id_x']}
y_2 = {'fare_id' :row_dict['fare_id'],
'fare_period' :row_dict['fare_period_y'],
'start_time' :row_dict['end_time_x'],
'end_time' :row_dict['end_time_y'],
'fare_period_id' :max_fare_period_id+1} # new
max_fare_period_id += 1
new_df = pd.DataFrame([y_1,x,y_2])
FastTripsLogger.debug("\n%s" % str(new_df))
# put it together with the unaffected fare_periodes we already had
prev_df = fare_rules_ft_df.loc[ (fare_rules_ft_df["fare_period_id"]!=row_dict["fare_period_id_x"])&
(fare_rules_ft_df["fare_period_id"]!=row_dict["fare_period_id_y"]) ]
fare_rules_ft_df = prev_df.append(new_df)
# sort by fare_id, start_time
fare_rules_ft_df.sort_values([Route.FARE_RULES_COLUMN_FARE_ID,
Route.FARE_RULES_COLUMN_START_TIME], inplace=True)
# reorder columns
fare_rules_ft_df = fare_rules_ft_df[[Route.FARE_RULES_COLUMN_FARE_ID,
Route.FARE_RULES_COLUMN_FARE_PERIOD,
"fare_period_id",
Route.FARE_RULES_COLUMN_START_TIME,
Route.FARE_RULES_COLUMN_END_TIME]]
FastTripsLogger.debug("\n%s" % str(fare_rules_ft_df))
loop_iters += 1
# don't loop forever -- there's a problem
if loop_iters > 5:
error_str = "Route.remove_fare_period_overlap looping too much! Something is wrong."
FastTripsLogger.critical(error_str)
raise UnexpectedError(error_str)
# this shouldn't happen
FastTripsLogger.warn("This shouldn't happen")
def write_routes_for_extension(self):
"""
Write to an intermediate formatted file for the C++ extension.
Since there are strings involved, it's easier than passing it to the extension.
"""
from .Assignment import Assignment
# write intermediate file -- route id num, route id
self.route_id_df[[Route.ROUTES_COLUMN_ROUTE_ID_NUM, Route.ROUTES_COLUMN_ROUTE_ID]].to_csv(
os.path.join(self.output_dir, Route.OUTPUT_ROUTE_ID_NUM_FILE), sep=" ", index=False)
FastTripsLogger.debug("Wrote %s" % os.path.join(self.output_dir, Route.OUTPUT_ROUTE_ID_NUM_FILE))
# write fare file
if len(self.fare_rules_df) > 0:
# copy for writing
fare_rules_df = self.fare_rules_df.copy()
# replace with float versions
fare_rules_df[Route.FARE_RULES_COLUMN_START_TIME] = (fare_rules_df[Route.FARE_RULES_COLUMN_START_TIME] - Assignment.NETWORK_BUILD_DATE_START_TIME)/np.timedelta64(1,'m')
fare_rules_df[Route.FARE_RULES_COLUMN_END_TIME ] = (fare_rules_df[Route.FARE_RULES_COLUMN_END_TIME ] - Assignment.NETWORK_BUILD_DATE_START_TIME)/np.timedelta64(1,'m')
# fillna with -1
for num_col in [Route.FARE_RULES_COLUMN_ROUTE_ID_NUM, Route.FARE_RULES_COLUMN_ORIGIN_ID_NUM, Route.FARE_RULES_COLUMN_DESTINATION_ID_NUM, Route.FARE_ATTR_COLUMN_TRANSFERS]:
if num_col in list(fare_rules_df.columns.values):
fare_rules_df.loc[ pd.isnull(fare_rules_df[num_col]), num_col] = -1
fare_rules_df[num_col] = fare_rules_df[num_col].astype(int)
else:
fare_rules_df[num_col] = -1
# temp column: duraton; sort by this so the smallest duration is found first
fare_rules_df["duration"] = fare_rules_df[Route.FARE_RULES_COLUMN_END_TIME ] - fare_rules_df[Route.FARE_RULES_COLUMN_START_TIME]
fare_rules_df.sort_values(by=[Route.FARE_RULES_COLUMN_FARE_ID_NUM,"duration"], ascending=True, inplace=True)
# transfer_duration fillna with -1
fare_rules_df.fillna({Route.FARE_ATTR_COLUMN_TRANSFER_DURATION:-1}, inplace=True)
# File with fare id num, fare id, fare class, price, xfers
fare_rules_df.to_csv(os.path.join(self.output_dir, Route.OUTPUT_FARE_ID_FILE),
columns=[Route.FARE_RULES_COLUMN_FARE_ID_NUM,
Route.FARE_RULES_COLUMN_FARE_ID,
Route.FARE_ATTR_COLUMN_FARE_PERIOD,
Route.FARE_RULES_COLUMN_ROUTE_ID_NUM,
Route.FARE_RULES_COLUMN_ORIGIN_ID_NUM,
Route.FARE_RULES_COLUMN_DESTINATION_ID_NUM,
Route.FARE_RULES_COLUMN_START_TIME,
Route.FARE_RULES_COLUMN_END_TIME,
Route.FARE_ATTR_COLUMN_PRICE,
Route.FARE_ATTR_COLUMN_TRANSFERS,
Route.FARE_ATTR_COLUMN_TRANSFER_DURATION],
sep=" ", index=False)
FastTripsLogger.debug("Wrote %s" % os.path.join(self.output_dir, Route.OUTPUT_FARE_ID_FILE))
if len(self.fare_transfer_rules_df) > 0:
# File with fare transfer rules
self.fare_transfer_rules_df.to_csv(os.path.join(self.output_dir, Route.OUTPUT_FARE_TRANSFER_FILE),
sep=" ", index=False)
FastTripsLogger.debug("Wrote %s" % os.path.join(self.output_dir, Route.OUTPUT_FARE_TRANSFER_FILE))
else:
FastTripsLogger.debug("No fare rules so no file %s" % os.path.join(self.output_dir, Route.OUTPUT_FARE_ID_FILE))
def add_fares(self, trip_links_df):
"""
Adds (or replaces) fare columns to the given :py:class:`pandas.DataFrame`.
New columns are
* :py:attr:`Assignment.SIM_COL_PAX_FARE`
* :py:attr:`Assignment.SIM_COL_PAX_FARE_PERIOD`
* :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_FROM_FARE_PERIOD`
* :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_TYPE`
* :py:attr:`Route.FARE_TRANSFER_RULES_COLUMN_AMOUNT`
* :py:attr:`Assignment.SIM_COL_PAX_FREE_TRANSFER`
"""
FastTripsLogger.info(" Adding fares to pathset")
from .Assignment import Assignment
if Assignment.SIM_COL_PAX_FARE in list(trip_links_df.columns.values):
trip_links_df.drop([Assignment.SIM_COL_PAX_FARE,
Assignment.SIM_COL_PAX_FARE_PERIOD,
Route.FARE_TRANSFER_RULES_COLUMN_FROM_FARE_PERIOD,
Route.FARE_TRANSFER_RULES_COLUMN_TYPE,
Route.FARE_TRANSFER_RULES_COLUMN_AMOUNT,
Assignment.SIM_COL_PAX_FREE_TRANSFER], axis=1, inplace=True)
# no fares configured
if len(self.fare_rules_df) == 0:
trip_links_df[Assignment.SIM_COL_PAX_FARE ] = 0
trip_links_df[Assignment.SIM_COL_PAX_FARE_PERIOD ] = None
trip_links_df[Route.FARE_TRANSFER_RULES_COLUMN_FROM_FARE_PERIOD] = None
trip_links_df[Route.FARE_TRANSFER_RULES_COLUMN_TYPE ] = None
trip_links_df[Route.FARE_TRANSFER_RULES_COLUMN_AMOUNT ] = None
trip_links_df[Assignment.SIM_COL_PAX_FREE_TRANSFER ] = None
return trip_links_df
orig_columns = list(trip_links_df.columns.values)
fare_columns = [Assignment.SIM_COL_PAX_FARE,
Assignment.SIM_COL_PAX_FARE_PERIOD]
transfer_columns = [Route.FARE_TRANSFER_RULES_COLUMN_FROM_FARE_PERIOD,
Route.FARE_TRANSFER_RULES_COLUMN_TYPE,
Route.FARE_TRANSFER_RULES_COLUMN_AMOUNT,
Assignment.SIM_COL_PAX_FREE_TRANSFER]
# give them a unique index and store it for later
trip_links_df.reset_index(drop=True, inplace=True)
trip_links_df["trip_links_df index"] = trip_links_df.index
num_trip_links = len(trip_links_df)
FastTripsLogger.debug("add_fares initial trips (%d):\n%s" % (num_trip_links, str(trip_links_df.head(20))))
FastTripsLogger.debug("add_fares initial fare_rules (%d):\n%s" % (len(self.fare_rules_df), str(self.fare_rules_df.head(20))))
# initialize
trip_links_unmatched = trip_links_df
trip_links_matched = pd.DataFrame()
del trip_links_df
from .Passenger import Passenger
# level 0: match on all three
fare_rules0 = self.fare_rules_df.loc[pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_ROUTE_ID ])&
pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_ORIGIN_ID ])&
pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_DESTINATION_ID])]
if len(fare_rules0) > 0:
trip_links_match0 = pd.merge(left =trip_links_unmatched,
right =fare_rules0,
how ="inner",
left_on =[Route.FARE_RULES_COLUMN_ROUTE_ID,"A_zone_id","B_zone_id"],
right_on =[Route.FARE_RULES_COLUMN_ROUTE_ID,Route.FARE_RULES_COLUMN_ORIGIN_ID,Route.FARE_RULES_COLUMN_DESTINATION_ID],
suffixes =["","_fare_rules"])
# delete rows where the board time is not within the fare period
trip_links_match0 = trip_links_match0.loc[ pd.isnull(trip_links_match0[Route.FARE_ATTR_COLUMN_PRICE])|
((trip_links_match0[Assignment.SIM_COL_PAX_BOARD_TIME] >= trip_links_match0[Route.FARE_RULES_COLUMN_START_TIME])&
(trip_links_match0[Assignment.SIM_COL_PAX_BOARD_TIME] < trip_links_match0[Route.FARE_RULES_COLUMN_END_TIME])) ]
FastTripsLogger.debug("add_fares level 0 (%d):\n%s" % (len(trip_links_match0), str(trip_links_match0.head(20))))
if len(trip_links_match0) > 0:
# update matched and unmatched == they should be disjoint with union = whole
trip_links_unmatched = pd.merge(left =trip_links_unmatched,
right=trip_links_match0[[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.PF_COL_PATH_NUM,
Passenger.PF_COL_LINK_NUM]],
how ="left",
on =[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.PF_COL_PATH_NUM,
Passenger.PF_COL_LINK_NUM],
indicator=True)
trip_links_unmatched = trip_links_unmatched.loc[ trip_links_unmatched["_merge"] == "left_only" ]
trip_links_unmatched.drop(["_merge"], axis=1, inplace=True)
trip_links_matched = pd.concat([trip_links_matched, trip_links_match0], axis=0, copy=False)
FastTripsLogger.debug("matched: %d unmatched: %d total: %d" % (len(trip_links_matched), len(trip_links_unmatched), len(trip_links_matched)+len(trip_links_unmatched)))
del trip_links_match0
# TODO - Addding stop gap solution - if there are duplicates, drop them
# but there's probably a better way to handle this, like flagging in input
# See https://app.asana.com/0/15582794263969/319659099709517
# trip_links_matched["dupe"] = trip_links_matched.duplicated(subset="trip_links_df index")
# FastTripsLogger.debug("dupes: \n%s" % trip_links_matched.loc[trip_links_matched["dupe"]==True].sort_values(by="trip_links_df index"))
trip_links_matched.drop_duplicates(subset="trip_links_df index", keep="first", inplace=True)
# level 1: match on route only
fare_rules1 = self.fare_rules_df.loc[pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_ROUTE_ID ])&
pd.isnull (self.fare_rules_df[Route.FARE_RULES_COLUMN_ORIGIN_ID ])&
pd.isnull (self.fare_rules_df[Route.FARE_RULES_COLUMN_DESTINATION_ID])]
if len(fare_rules1) > 0:
trip_links_match1 = pd.merge(left =trip_links_unmatched,
right =fare_rules1,
how ="inner",
on =Route.FARE_RULES_COLUMN_ROUTE_ID,
suffixes =["","_fare_rules"])
# delete rows where the board time is not within the fare period
trip_links_match1 = trip_links_match1.loc[ pd.isnull(trip_links_match1[Route.FARE_ATTR_COLUMN_PRICE])|
((trip_links_match1[Assignment.SIM_COL_PAX_BOARD_TIME] >= trip_links_match1[Route.FARE_RULES_COLUMN_START_TIME])&
(trip_links_match1[Assignment.SIM_COL_PAX_BOARD_TIME] < trip_links_match1[Route.FARE_RULES_COLUMN_END_TIME])) ]
FastTripsLogger.debug("add_fares level 1 (%d):\n%s" % (len(trip_links_match1), str(trip_links_match1.head())))
if len(trip_links_match1) > 0:
# update matched and unmatched == they should be disjoint with union = whole
trip_links_unmatched = pd.merge(left =trip_links_unmatched,
right=trip_links_match1[[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.PF_COL_PATH_NUM,
Passenger.PF_COL_LINK_NUM]],
how ="left",
on =[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.PF_COL_PATH_NUM,
Passenger.PF_COL_LINK_NUM],
indicator=True)
trip_links_unmatched = trip_links_unmatched.loc[ trip_links_unmatched["_merge"] == "left_only" ]
trip_links_unmatched.drop(["_merge"], axis=1, inplace=True)
trip_links_matched = pd.concat([trip_links_matched, trip_links_match1], axis=0, copy=False)
FastTripsLogger.debug("matched: %d unmatched: %d total: %d" % (len(trip_links_matched), len(trip_links_unmatched), len(trip_links_matched)+len(trip_links_unmatched)))
del trip_links_match1
# level 2: match on origin and destination zones only
fare_rules2 = self.fare_rules_df.loc[pd.isnull (self.fare_rules_df[Route.FARE_RULES_COLUMN_ROUTE_ID ])&
pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_ORIGIN_ID ])&
pd.notnull(self.fare_rules_df[Route.FARE_RULES_COLUMN_DESTINATION_ID])]
if len(fare_rules2) > 0:
trip_links_match2 = pd.merge(left =trip_links_unmatched,
right =fare_rules2,
how ="inner",
left_on =["A_zone_id","B_zone_id"],
right_on =[Route.FARE_RULES_COLUMN_ORIGIN_ID,Route.FARE_RULES_COLUMN_DESTINATION_ID],
suffixes =["","_fare_rules"])
# delete rows where the board time is not within the fare period
trip_links_match2 = trip_links_match2.loc[ pd.isnull(trip_links_match2[Route.FARE_ATTR_COLUMN_PRICE])|
((trip_links_match2[Assignment.SIM_COL_PAX_BOARD_TIME] >= trip_links_match2[Route.FARE_RULES_COLUMN_START_TIME])&
(trip_links_match2[Assignment.SIM_COL_PAX_BOARD_TIME] < trip_links_match2[Route.FARE_RULES_COLUMN_END_TIME])) ]
FastTripsLogger.debug("add_fares level 2 (%d):\n%s" % (len(trip_links_match2), str(trip_links_match2.head())))
if len(trip_links_match2) > 0:
# update matched and unmatched == they should be disjoint with union = whole
trip_links_unmatched = pd.merge(left =trip_links_unmatched,
right=trip_links_match2[[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.PF_COL_PATH_NUM,
Passenger.PF_COL_LINK_NUM]],
how ="left",
on =[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.PF_COL_PATH_NUM,
Passenger.PF_COL_LINK_NUM],
indicator=True)
trip_links_unmatched = trip_links_unmatched.loc[ trip_links_unmatched["_merge"] == "left_only" ]
trip_links_unmatched.drop(["_merge"], axis=1, inplace=True)
trip_links_matched =
|
pd.concat([trip_links_matched, trip_links_match2], axis=0, copy=False)
|
pandas.concat
|
import faiss
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from tqdm.auto import tqdm
import pandas as pd
import scipy
import pickle
import json
import os
import numpy as np
from collections import Counter
import re
from datasets import (
Dataset,
load_from_disk,
concatenate_datasets,
)
from konlpy.tag import Mecab
import numpy as np
from tqdm import tqdm, trange
import argparse
import random
import torch
import os
import torch.nn.functional as F
from transformers import BertModel, BertPreTrainedModel, AdamW, TrainingArguments, get_linear_schedule_with_warmup, AutoTokenizer
import pickle
from rank_bm25 import BM25Okapi, BM25Plus, BM25L, BM25
import time
from contextlib import contextmanager
import math
from multiprocessing import Pool, cpu_count
@contextmanager
def timer(name):
t0 = time.time()
yield
print(f'[{name}] done in {time.time() - t0:.3f} s')
ban_words=("이따금","아마","절대로","무조건","한때","대략","오직",
"오로지","감히","최소","아예","반드시","꼭","때때로","이미"
,"종종","졸곧","약간","기꺼이", "비록","꾸준히","일부러","어쩔", "문득", "어쨌든", "순전히", "필수")
mecab = Mecab()
class ES(BM25):
def __init__(self, corpus, tokenizer=None, k1=1.2, b=0.75, delta=0):
# Algorithm specific parameters
self.k1 = k1
self.b = b
self.delta = delta
super().__init__(corpus, tokenizer)
def _calc_idf(self, nd):
for word, freq in nd.items():
idf = math.log(1 + ((self.corpus_size + 0.5 - freq) / (freq+0.5)))
self.idf[word] = idf
def get_scores(self, query):
score = np.zeros(self.corpus_size)
doc_len = np.array(self.doc_len)
for q in query:
q_freq = np.array([(doc.get(q) or 0) for doc in self.doc_freqs])
score += (self.idf.get(q) or 0) * (self.delta + (q_freq) /
(self.k1 * (1 - self.b + self.b * doc_len / self.avgdl) + q_freq))
return score
def remove_q(query):
stop = "|".join(
"어느 무엇인가요 무엇 누가 누구인가요 누구인가 누구 어디에서 어디에 어디서 어디인가요 어디를 어디 언제 어떤 어떠한 몇 얼마 얼마나 뭐 어떻게 무슨 \?".split(
" "
)
)
rm = re.sub(stop, "", query).strip()
return rm
class BertEncoder(BertPreTrainedModel):
def __init__(self, config):
super(BertEncoder, self).__init__(config)
self.bert = BertModel(config)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None):
outputs = self.bert(input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids)
pooled_output = outputs[1]
return pooled_output
'''
tokenizer = AutoTokenizer.from_pretrained("bert-base-multilingual-cased")
model_checkpoint="bert-base-multilingual-cased"
# load pre-trained model on cuda (if available)
p_encoder = BertEncoder.from_pretrained(model_checkpoint).cuda()
q_encoder = BertEncoder.from_pretrained(model_checkpoint).cuda()
p_encoder.load_state_dict(torch.load("./p_encoder_fin.pth"))
q_encoder.load_state_dict(torch.load("./q_encoder_fin.pth"))
'''
def to_cuda(batch):
return tuple(t.cuda() for t in batch)
class DenseRetrieval:
def __init__(self, tokenize_fn, data_path="./data/", context_path="wikipedia_documents.json"):
self.data_path = data_path
self.wiki_embs = None
with open(os.path.join(data_path, context_path), "r") as f:
wiki = json.load(f)
self.contexts = list(dict.fromkeys([v['text'] for v in wiki.values()]))
pickle_name = f"./data/dense_embedding.bin"
if os.path.isfile(pickle_name):
with open(pickle_name,"rb") as file:
self.wiki_embs=pickle.load(file)
print("Pre")
else:
with torch.no_grad():
self.wiki_embs = []
for text in self.contexts:
p = tokenizer(text, padding="max_length", truncation=True, return_tensors='pt').to('cuda')
wiki_emb = p_encoder(**p).to('cpu').numpy()
self.wiki_embs.append(wiki_emb)
self.wiki_embs = torch.Tensor(self.wiki_embs).squeeze() # (num_passage, emb_dim)
with open(pickle_name,"wb") as file:
pickle.dump(self.wiki_embs,file)
self.f=open("./track.txt","w")
def retrieve(self, query_or_dataset, topk=1):
if isinstance(query_or_dataset, str):
doc_scores, doc_indices = self.get_relevant_doc(query_or_dataset, k=topk)
return doc_scores, doc_indices
elif isinstance(query_or_dataset, Dataset):
# make retrieved result as dataframe
total = []
super_count=0
doc_scores, doc_indices = self.get_relevant_doc_bulk(query_or_dataset['question'], k=5)
for idx, example in enumerate(tqdm(query_or_dataset, desc="Dense retrieval: ")):
tmp = {
"question": example["question"],
"id": example['id'],
#"context_id": doc_indices[idx][0], # retrieved id
"context": " ".join(self.contexts[doc_indices[idx][i]] for i in range(5)) # retrieved doument
}
if 'context' in example.keys() and 'answers' in example.keys():
tmp["original_context"] = example['context'] # original document
tmp["answers"] = example['answers'] # original answer
total.append(tmp)
cqas = pd.DataFrame(total)
return cqas
def get_relevant_doc(self, query, k=3):
with torch.no_grad():
q_seqs_val = tokenizer([query], padding="max_length", truncation=True, return_tensors='pt').to('cuda')
q_emb = q_encoder(**q_seqs_val).to('cpu') #(num_query, emb_dim)
result = torch.matmul(q_emb, torch.transpose(self.wiki_embs, 0, 1))
rank = torch.argsort(result, dim=1, descending=True).squeeze()
self.f.write("=============\n")
self.f.write("Query "+query+"\n")
for idx,i in enumerate(range(k)):
self.f.write(str(idx)+self.contexts[rank[i]][:75]+"\n")
print(result.squeeze()[rank].tolist()[:k], rank.tolist()[:k])
return result.squeeze()[rank].tolist()[:k], rank.tolist()[:k]
def get_relevant_doc_bulk(self, queries, k=1):
doc_scores = []
doc_indices = []
for query in queries:
ret0,ret1=self.get_relevant_doc(query,k)
doc_scores.append(ret0)
doc_indices.append(ret1)
self.f.close()
return doc_scores, doc_indices
class SparseRetrieval:
def __init__(self, tokenize_fn, data_path="./data/", context_path="wikipedia_documents.json"):
self.data_path = data_path
with open(os.path.join(data_path, context_path), "r") as f:
wiki = json.load(f)
self.contexts = list(dict.fromkeys([v['text'] for v in wiki.values()])) # set 은 매번 순서가 바뀌므로
print(f"Lengths of unique contexts : {len(self.contexts)}")
self.ids = list(range(len(self.contexts)))
# Transform by vectorizer
self.tfidfv = TfidfVectorizer(
tokenizer=tokenize_fn,
ngram_range=(1, 2),
max_features=50000,
)
# should run get_sparse_embedding() or build_faiss() first.
self.p_embedding = None
self.indexer = None
def get_sparse_embedding(self):
# Pickle save.
pickle_name = f"sparse_embedding.bin"
tfidfv_name = f"tfidv.bin"
emd_path = os.path.join(self.data_path, pickle_name)
tfidfv_path = os.path.join(self.data_path, tfidfv_name)
if os.path.isfile(emd_path) and os.path.isfile(tfidfv_path):
with open(emd_path, "rb") as file:
self.p_embedding = pickle.load(file)
with open(tfidfv_path, "rb") as file:
self.tfidfv = pickle.load(file)
print("Embedding pickle load.")
else:
print("Build passage embedding")
self.p_embedding = self.tfidfv.fit_transform(self.contexts)
print(self.p_embedding.shape)
with open(emd_path, "wb") as file:
pickle.dump(self.p_embedding, file)
with open(tfidfv_path, "wb") as file:
pickle.dump(self.tfidfv, file)
print("Embedding pickle saved.")
def retrieve(self, query_or_dataset, topk=3):
assert self.p_embedding is not None, "You must build faiss by self.get_sparse_embedding() before you run self.retrieve()."
if isinstance(query_or_dataset, str):
doc_scores, doc_indices = self.get_relevant_doc(query_or_dataset, k=topk)
return doc_scores, doc_indices
elif isinstance(query_or_dataset, Dataset):
# make retrieved result as dataframe
total = []
doc_scores, doc_indices = self.get_relevant_doc_bulk(query_or_dataset['question'], k=20)
for idx, example in enumerate(tqdm(query_or_dataset, desc="Sparse retrieval: ")):
tmp = {
"question": example["question"],
"id": example['id'],
#"context_id": doc_indices[idx][0], # retrieved id
"context": " ".join(self.contexts[doc_indices[idx][i]] for i in range(20)) # retrieved doument
}
if 'context' in example.keys() and 'answers' in example.keys():
tmp["original_context"] = example['context'] # original document
tmp["answers"] = example['answers'] # original answer
total.append(tmp)
cqas = pd.DataFrame(total)
return cqas
def get_relevant_doc(self, query, k=1):
"""
참고: vocab 에 없는 이상한 단어로 query 하는 경우 assertion 발생 (예) 뙣뙇?
"""
with timer("transform"):
query_vec = self.tfidfv.transform([query])
assert (
np.sum(query_vec) != 0
), "오류가 발생했습니다. 이 오류는 보통 query에 vectorizer의 vocab에 없는 단어만 존재하는 경우 발생합니다."
with timer("query ex search"):
result = query_vec * self.p_embedding.T
if not isinstance(result, np.ndarray):
result = result.toarray()
sorted_result = np.argsort(result.squeeze())[::-1]
return result.squeeze()[sorted_result].tolist()[:k], sorted_result.tolist()[:k]
def get_relevant_doc_bulk(self, queries, k=1):
query_vec = self.tfidfv.transform(queries)
assert (
np.sum(query_vec) != 0
), "오류가 발생했습니다. 이 오류는 보통 query에 vectorizer의 vocab에 없는 단어만 존재하는 경우 발생합니다."
result = query_vec * self.p_embedding.T
if not isinstance(result, np.ndarray):
result = result.toarray()
doc_scores = []
doc_indices = []
for i in range(result.shape[0]):
sorted_result = np.argsort(result[i, :])[::-1]
doc_scores.append(result[i, :][sorted_result].tolist()[:k])
doc_indices.append(sorted_result.tolist()[:k])
return doc_scores, doc_indices
class BM25:
def __init__(self, tokenize_fn, data_path="./data/", context_path="wikipedia_documents.json", k1=1.2, b=0.25):
self.data_path = data_path
with open(os.path.join(data_path, context_path), "r") as f:
wiki = json.load(f)
self.contexts = list(dict.fromkeys([v['text'] for v in wiki.values()])) # set 은 매번 순서가 바뀌므로
self.l=[]
for v in self.contexts:
self.l.append(len(mecab.morphs(v)))
self.l=np.array(self.l)
print("Avg",np.average(self.l))
self.l=self.l/np.average(self.l)
self.l=(k1*(1-b+b*self.l)).astype(np.float32)
print("L SHAP",self.l.shape)
print(f"Lengths of unique contexts : {len(self.contexts)}")
self.ids = list(range(len(self.contexts)))
# Transform by vectorizer
self.tfidfv = CountVectorizer(
tokenizer=tokenize_fn,
ngram_range=(1, 2),
max_features=50000,
)
# should run get_sparse_embedding() or build_faiss() first.
self.p_embedding = None
self.a=None
def get_sparse_embedding(self):
# Pickle save.
pickle_name = f"bm_embedding.bin"
tfidfv_name = f"bm.bin"
emd_path = os.path.join(self.data_path, pickle_name)
tfidfv_path = os.path.join(self.data_path, tfidfv_name)
if os.path.isfile(emd_path) and os.path.isfile(tfidfv_path):
with open(emd_path, "rb") as file:
self.p_embedding = pickle.load(file)
with open(tfidfv_path, "rb") as file:
self.tfidfv = pickle.load(file)
print("Embedding pickle load.")
else:
print("Build passage embedding")
self.p_embedding = self.tfidfv.fit_transform(self.contexts)
print(self.p_embedding.shape)
with open(emd_path, "wb") as file:
pickle.dump(self.p_embedding, file)
with open(tfidfv_path, "wb") as file:
pickle.dump(self.tfidfv, file)
print("Embedding pickle saved.")
self.a=self.p_embedding
z1=(len(self.contexts)-self.a.count_nonzero()+0.5)
z2=(self.a.count_nonzero()+0.5)
#z1=(len(self.contexts)-np.count_nonzero(self.a>0,axis=0)+0.5)
#z2=(np.count_nonzero(self.a>0,axis=0)+0.5)
z1=np.log(1+z1/z2)
del(z2)
self.a=(z1*((self.a)/(self.a+self.l[:,np.newaxis])))
print(self.a)
def retrieve(self, query_or_dataset, topk=1):
assert self.p_embedding is not None, "You must build faiss by self.get_sparse_embedding() before you run self.retrieve()."
if isinstance(query_or_dataset, str):
doc_scores, doc_indices = self.get_relevant_doc(query_or_dataset, k=topk)
print("[Search query]\n", query_or_dataset, "\n")
for i in range(topk):
print("Top-%d passage with score %.4f" % (i + 1, doc_scores[i]))
print(self.contexts[doc_indices[i]])
return doc_scores, [self.contexts[doc_indices[i]] for i in range(topk)]
elif isinstance(query_or_dataset, Dataset):
# make retrieved result as dataframe
total = []
with timer("query exhaustive search"):
doc_scores, doc_indices = self.get_relevant_doc_bulk(query_or_dataset['question'], k=1)
for idx, example in enumerate(tqdm(query_or_dataset, desc="Sparse retrieval: ")):
# relev_doc_ids = [el for i, el in enumerate(self.ids) if i in doc_indices[idx]]
tmp = {
"question": example["question"],
"id": example['id'],
"context_id": doc_indices[idx][0], # retrieved id
"context": self.contexts[doc_indices[idx][0]] # retrieved doument
}
if 'context' in example.keys() and 'answers' in example.keys():
tmp["original_context"] = example['context'] # original document
tmp["answers"] = example['answers'] # original answer
total.append(tmp)
cqas =
|
pd.DataFrame(total)
|
pandas.DataFrame
|
import sys
import os.path
import numpy as np
import pandas
from Sloth.cluster import KMeans
from sklearn.cluster import KMeans as sk_kmeans
from tslearn.datasets import CachedDatasets
from d3m.primitive_interfaces.base import PrimitiveBase, CallResult
from d3m import container, utils
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import hyperparams, base as metadata_base, params
from common_primitives import utils as utils_cp, dataset_to_dataframe as DatasetToDataFrame, dataframe_utils, denormalize
__author__ = 'Distil'
__version__ = '2.0.5'
__contact__ = 'mailto:<EMAIL>'
Inputs = container.pandas.DataFrame
Outputs = container.pandas.DataFrame
class Params(params.Params):
pass
class Hyperparams(hyperparams.Hyperparams):
algorithm = hyperparams.Enumeration(default = 'TimeSeriesKMeans',
semantic_types = ['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
values = ['GlobalAlignmentKernelKMeans', 'TimeSeriesKMeans'],
description = 'type of clustering algorithm to use')
nclusters = hyperparams.UniformInt(lower=1, upper=sys.maxsize, default=3, semantic_types=
['https://metadata.datadrivendiscovery.org/types/TuningParameter'], description = 'number of clusters \
to user in kernel kmeans algorithm')
n_init = hyperparams.UniformInt(lower=1, upper=sys.maxsize, default=10, semantic_types=
['https://metadata.datadrivendiscovery.org/types/TuningParameter'], description = 'Number of times the k-means algorithm \
will be run with different centroid seeds. Final result will be the best output on n_init consecutive runs in terms of inertia')
pass
class Storc(PrimitiveBase[Inputs, Outputs, Params, Hyperparams]):
"""
Primitive that applies kmeans clustering to time series data. Algorithm options are 'GlobalAlignmentKernelKMeans'
or 'TimeSeriesKMeans,' both of which are bootstrapped from the base library tslearn.clustering. This is an unsupervised,
clustering primitive, but has been represented as a supervised classification problem to produce a compliant primitive.
Training inputs: D3M dataset with features and labels, and D3M indices
Outputs: D3M dataset with predicted labels and D3M indices
"""
metadata = metadata_base.PrimitiveMetadata({
# Simply an UUID generated once and fixed forever. Generated using "uuid.uuid4()".
'id': "77bf4b92-2faa-3e38-bb7e-804131243a7f",
'version': __version__,
'name': "Sloth",
# Keywords do not have a controlled vocabulary. Authors can put here whatever they find suitable.
'keywords': ['Time Series','Clustering'],
'source': {
'name': __author__,
'contact': __contact__,
'uris': [
# Unstructured URIs.
"https://github.com/NewKnowledge/D3M-Unsupervised",
],
},
# A list of dependencies in order. These can be Python packages, system packages, or Docker images.
# Of course Python packages can also have their own dependencies, but sometimes it is necessary to
# install a Python package first to be even able to run setup.py of another package. Or you have
# a dependency which is not on PyPi.
'installation': [
{
'type': metadata_base.PrimitiveInstallationType.PIP,
'package': 'cython',
'version': '0.29.7',
},
{
'type': metadata_base.PrimitiveInstallationType.PIP,
'package_uri': 'git+https://github.com/NewKnowledge/D3M-Unsupervised.git@{git_commit}#egg=D3MUnsupervised'.format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),)
}
],
# The same path the primitive is registered with entry points in setup.py.
'python_path': 'd3m.primitives.clustering.k_means.Sloth',
# Choose these from a controlled vocabulary in the schema. If anything is missing which would
# best describe the primitive, make a merge request.
'algorithm_types': [
metadata_base.PrimitiveAlgorithmType.K_MEANS_CLUSTERING,
],
'primitive_family': metadata_base.PrimitiveFamily.CLUSTERING,
})
def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0)-> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed)
self._X_train = None # training inputs
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
'''
fits Kmeans clustering algorithm using training data from set_training_data and hyperparameters
'''
self._kmeans.fit(self._X_train)
return CallResult(None)
def get_params(self) -> Params:
return self._params
def set_params(self, *, params: Params) -> None:
self.params = params
def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:
'''
Sets primitive's training data
Parameters
----------
inputs: d3m dataset containing training time series
'''
#hyperparams_class = DatasetToDataFrame.DatasetToDataFramePrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
#ds2df_client = DatasetToDataFrame.DatasetToDataFramePrimitive(hyperparams = hyperparams_class.defaults().replace({"dataframe_resource":"learningData"}))
#metadata_inputs = ds2df_client.produce(inputs = inputs).value
#formatted_inputs = ds2df_client.produce(inputs = inputs).value
# store information on target, index variable
targets = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/TrueTarget')
if not len(targets):
targets = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/TrueTarget')
if not len(targets):
targets = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/SuggestedTarget')
target_names = [list(inputs)[t] for t in targets]
index = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/PrimaryKey')
series = inputs[target_names] != ''
self.clustering = 0
if not series.any().any():
self.clustering = 1
# load and reshape training data
n_ts = len(inputs.d3mIndex.unique())
if n_ts == inputs.shape[0]:
self._kmeans = sk_kmeans(n_clusters = self.hyperparams['nclusters'], n_init = self.hyperparams['n_init'], random_state=self.random_seed)
self._X_train_all_data = inputs.drop(columns = list(inputs)[index[0]])
self._X_train = self._X_train_all_data.drop(columns = target_names).values
else:
self._kmeans = KMeans(self.hyperparams['nclusters'], self.hyperparams['algorithm'])
ts_sz = int(inputs.shape[0] / n_ts)
self._X_train = np.array(inputs.value).reshape(n_ts, ts_sz, 1)
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[container.pandas.DataFrame]:
"""
Parameters
----------
inputs : D3M dataframe with associated metadata.
Returns
-------
Outputs
For unsupervised problems: The output is a dataframe containing a single column where each entry is the associated series' cluster number.
For semi-supervised problems: The output is the input df containing an additional feature - cluster_label
"""
#hyperparams_class = DatasetToDataFrame.DatasetToDataFramePrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
#ds2df_client = DatasetToDataFrame.DatasetToDataFramePrimitive(hyperparams = hyperparams_class.defaults().replace({"dataframe_resource":"learningData"}))
#metadata_inputs = ds2df_client.produce(inputs = inputs).value
#formatted_inputs = ds2df_client.produce(inputs = inputs).value
# store information on target, index variable
targets = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/TrueTarget')
if not len(targets):
targets = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/TrueTarget')
if not len(targets):
targets = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/SuggestedTarget')
target_names = [list(inputs)[t] for t in targets]
index = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/PrimaryKey')
index_names = [list(inputs)[i] for i in index]
# load and reshape training data
n_ts = len(inputs.d3mIndex.unique())
if n_ts == inputs.shape[0]:
X_test = inputs.drop(columns = list(inputs)[index[0]])
X_test = X_test.drop(columns = target_names).values
else:
ts_sz = int(inputs.shape[0] / n_ts)
X_test = np.array(inputs.value).reshape(n_ts, ts_sz, 1)
# special semi-supervised case - during training, only produce rows with labels
if self.clustering:
sloth_df = d3m_DataFrame(pandas.DataFrame(self._kmeans.predict(X_test), columns = [target_names[0]]))
sloth_df =
|
pandas.concat([inputs.d3mIndex, sloth_df], axis=1)
|
pandas.concat
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# calculate step coordinates
def calculate_steps(tableaux, xs=['x_1','x_2']):
steps = len(tableaux)
coords = np.zeros([steps, 2])
values = np.zeros(steps)
for step in xrange(steps):
values[step] = tableaux[step].ix[-1,'value']
x_1_val = tableaux[step].ix[tableaux[step].ix[:,'basic_variable']==xs[0],'value']
if len(x_1_val) > 0: coords[step, 0] = x_1_val[0]
x_2_val = tableaux[step].ix[tableaux[step].ix[:,'basic_variable']==xs[1],'value']
if len(x_2_val) > 0: coords[step, 1] = x_2_val[0]
return coords, values
# plotting functions
def plot_it(x_1_bounds, x_2_bounds, objective, res=50, title='Graph', xlabel=r'x_1', ylabel=r'x_2', legend_loc=4, constraints=None, constraint_labels=None, auc=True):
fig = plt.figure()
axes = fig.add_subplot(111)
# plot axes
# axis_color = '#B3B3B3'
# axis_width = 5
# axes.axhline(0, color=axis_color, linewidth=axis_width)
# axes.axvline(0, color=axis_color, linewidth=axis_width)
# plot objective
obj_x = np.linspace(x_1_bounds[0], x_1_bounds[1], res)
obj_y = np.linspace(x_2_bounds[0], x_2_bounds[1], res)
obj_f = np.empty([obj_x.size, obj_y.size])
for i, obj_x_i in enumerate(obj_x):
obj_f[:,i] = objective[0] * obj_x_i + objective[1] * obj_y
axes.contourf(obj_x, obj_y, obj_f, res, cmap='Oranges', alpha=0.7)
# plot constraints
const_colors = plt.rcParams['axes.color_cycle']
n_constraints = constraints.shape[0]
constraint_width = 1.5
if constraint_labels==None:
constraint_labels = np.empty(n_constraints, dtype=object)
for i in xrange(n_constraints):
constraint_labels[i] = 'Constraint ' + str(i+1)
def plot_constraint(i):
# find x intercept
x_int = constraints[i,2]/constraints[i,0]
if x_int > 0:
xs = np.linspace(x_1_bounds[0], min(x_1_bounds[1], x_int), res)
else:
xs = np.linspace(x_1_bounds[0], x_1_bounds[1], res)
ys = (constraints[i,2] - constraints[i,0] * xs) / constraints[i,1]
axes.plot(xs, ys, label=constraint_labels[i], linewidth=constraint_width, color=const_colors[i])
# fill under constraints
if auc==True:
axes.fill_between(xs, ys, color=const_colors[i], alpha=0.5)
for i in xrange(n_constraints):
plot_constraint(i)
# label graph
axes.set_title(title)
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.legend(loc=legend_loc)
return axes
def make_tableau(constraints, objective, variables):
rows = np.concatenate((constraints, objective), axis=0)
column_names = variables + ['value', 'basic_variable']
row_names = np.empty(rows.shape[0], dtype=object)
for i in xrange(rows.shape[0] - 1):
row_names[i] = 'c_' + str(i+1)
row_names[-1] = 'z'
tableau =
|
pd.DataFrame(rows, columns=column_names, index=row_names)
|
pandas.DataFrame
|
import operator
from operator import methodcaller
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as tm
import pytest
import ibis
import ibis.expr.datatypes as dt
from ... import connect, execute
pytestmark = pytest.mark.pandas
def test_table_column(t, df):
expr = t.plain_int64
result = expr.execute()
expected = df.plain_int64
tm.assert_series_equal(result, expected)
def test_literal(client):
assert client.execute(ibis.literal(1)) == 1
def test_read_with_undiscoverable_type(client):
with pytest.raises(TypeError):
client.table('df')
def test_selection(t, df):
expr = t[
((t.plain_strings == 'a') | (t.plain_int64 == 3))
& (t.dup_strings == 'd')
]
result = expr.execute()
expected = df[
((df.plain_strings == 'a') | (df.plain_int64 == 3))
& (df.dup_strings == 'd')
].reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
def test_mutate(t, df):
expr = t.mutate(x=t.plain_int64 + 1, y=t.plain_int64 * 2)
result = expr.execute()
expected = df.assign(x=df.plain_int64 + 1, y=df.plain_int64 * 2)
tm.assert_frame_equal(result[expected.columns], expected)
def test_project_scope_does_not_override(t, df):
col = t.plain_int64
expr = t[
[
col.name('new_col'),
col.sum()
.over(ibis.window(group_by='dup_strings'))
.name('grouped'),
]
]
result = expr.execute()
expected = pd.concat(
[
df[['plain_int64', 'dup_strings']].rename(
columns={'plain_int64': 'new_col'}
),
df.groupby('dup_strings')
.plain_int64.transform('sum')
.reset_index(drop=True)
.rename('grouped'),
],
axis=1,
)[['new_col', 'grouped']]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'where',
[
lambda t: None,
lambda t: t.dup_strings == 'd',
lambda t: (t.dup_strings == 'd') | (t.plain_int64 < 100),
],
)
@pytest.mark.parametrize(
('ibis_func', 'pandas_func'),
[
(methodcaller('abs'), np.abs),
(methodcaller('ceil'), np.ceil),
(methodcaller('exp'), np.exp),
(methodcaller('floor'), np.floor),
(methodcaller('ln'), np.log),
(methodcaller('log10'), np.log10),
(methodcaller('log', 2), lambda x: np.log(x) / np.log(2)),
(methodcaller('log2'), np.log2),
(methodcaller('round', 0), lambda x: x.round(0).astype('int64')),
(methodcaller('round', -2), methodcaller('round', -2)),
(methodcaller('round', 2), methodcaller('round', 2)),
(methodcaller('round'), lambda x: x.round().astype('int64')),
(methodcaller('sign'), np.sign),
(methodcaller('sqrt'), np.sqrt),
],
)
def test_aggregation_group_by(t, df, where, ibis_func, pandas_func):
ibis_where = where(t)
expr = t.group_by(t.dup_strings).aggregate(
avg_plain_int64=t.plain_int64.mean(where=ibis_where),
sum_plain_float64=t.plain_float64.sum(where=ibis_where),
mean_float64_positive=ibis_func(t.float64_positive).mean(
where=ibis_where
),
neg_mean_int64_with_zeros=(-t.int64_with_zeros).mean(where=ibis_where),
nunique_dup_ints=t.dup_ints.nunique(),
)
result = expr.execute()
pandas_where = where(df)
mask = slice(None) if pandas_where is None else pandas_where
expected = (
df.groupby('dup_strings')
.agg(
{
'plain_int64': lambda x, mask=mask: x[mask].mean(),
'plain_float64': lambda x, mask=mask: x[mask].sum(),
'dup_ints': 'nunique',
'float64_positive': (
lambda x, mask=mask, func=pandas_func: func(x[mask]).mean()
),
'int64_with_zeros': lambda x, mask=mask: (-x[mask]).mean(),
}
)
.reset_index()
.rename(
columns={
'plain_int64': 'avg_plain_int64',
'plain_float64': 'sum_plain_float64',
'dup_ints': 'nunique_dup_ints',
'float64_positive': 'mean_float64_positive',
'int64_with_zeros': 'neg_mean_int64_with_zeros',
}
)
)
# TODO(phillipc): Why does pandas not return floating point values here?
expected['avg_plain_int64'] = expected.avg_plain_int64.astype('float64')
result['avg_plain_int64'] = result.avg_plain_int64.astype('float64')
expected[
'neg_mean_int64_with_zeros'
] = expected.neg_mean_int64_with_zeros.astype('float64')
result[
'neg_mean_int64_with_zeros'
] = result.neg_mean_int64_with_zeros.astype('float64')
expected['mean_float64_positive'] = expected.mean_float64_positive.astype(
'float64'
)
result['mean_float64_positive'] = result.mean_float64_positive.astype(
'float64'
)
lhs = result[expected.columns]
rhs = expected
tm.assert_frame_equal(lhs, rhs)
def test_aggregation_without_group_by(t, df):
expr = t.aggregate(
avg_plain_int64=t.plain_int64.mean(),
sum_plain_float64=t.plain_float64.sum(),
)
result = expr.execute()[['avg_plain_int64', 'sum_plain_float64']]
new_names = {
'plain_float64': 'sum_plain_float64',
'plain_int64': 'avg_plain_int64',
}
expected = (
pd.Series(
[df['plain_int64'].mean(), df['plain_float64'].sum()],
index=['plain_int64', 'plain_float64'],
)
.to_frame()
.T.rename(columns=new_names)
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_group_by_with_having(t, df):
expr = (
t.group_by(t.dup_strings)
.having(t.plain_float64.sum() == 5)
.aggregate(avg_a=t.plain_int64.mean(), sum_c=t.plain_float64.sum())
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.agg({'plain_int64': 'mean', 'plain_float64': 'sum'})
.reset_index()
.rename(columns={'plain_int64': 'avg_a', 'plain_float64': 'sum_c'})
)
expected = expected.loc[expected.sum_c == 5, ['avg_a', 'sum_c']]
tm.assert_frame_equal(result[expected.columns], expected)
def test_group_by_rename_key(t, df):
expr = t.groupby(t.dup_strings.name('foo')).aggregate(
dup_string_count=t.dup_strings.count()
)
assert 'foo' in expr.schema()
result = expr.execute()
assert 'foo' in result.columns
expected = (
df.groupby('dup_strings')
.dup_strings.count()
.rename('dup_string_count')
.reset_index()
.rename(columns={'dup_strings': 'foo'})
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('reduction', ['mean', 'sum', 'count', 'std', 'var'])
@pytest.mark.parametrize(
'where',
[
lambda t: (t.plain_strings == 'a') | (t.plain_strings == 'c'),
lambda t: (t.dup_strings == 'd')
& ((t.plain_int64 == 1) | (t.plain_int64 == 3)),
lambda t: None,
],
)
def test_reduction(t, df, reduction, where):
func = getattr(t.plain_int64, reduction)
mask = where(t)
expr = func(where=mask)
result = expr.execute()
df_mask = where(df)
expected_func = getattr(
df.loc[df_mask if df_mask is not None else slice(None), 'plain_int64'],
reduction,
)
expected = expected_func()
assert result == expected
@pytest.mark.parametrize(
'reduction',
[
lambda x: x.any(),
lambda x: x.all(),
lambda x: ~(x.any()),
lambda x: ~(x.all()),
],
)
def test_boolean_aggregation(t, df, reduction):
expr = reduction(t.plain_int64 == 1)
result = expr.execute()
expected = reduction(df.plain_int64 == 1)
assert result == expected
@pytest.mark.parametrize('column', ['float64_with_zeros', 'int64_with_zeros'])
def test_null_if_zero(t, df, column):
expr = t[column].nullifzero()
result = expr.execute()
expected = df[column].replace(0, np.nan)
|
tm.assert_series_equal(result, expected)
|
pandas.testing.assert_series_equal
|
import pandas as pd
import numpy as np
input_file = '/Users/andrewemmett/Projects/adventofcode/2021/data/day3aocd.data'
test_input = '/Users/andrewemmett/Projects/adventofcode/2021/data/day3_test.data'
my_input = '/Users/andrewemmett/Projects/adventofcode/2021/data/day3_my_test.data'
def binary_convert(binary):
gamma = 0
epsilon = 0
for i, x in enumerate(binary[::-1]):
# decode binary gamma
gamma += int(x) * 2 ** i
# handle inverse for epsilon
if int(x) == 1:
y = 0
else:
y = 1
epsilon += int(y) * 2 ** i
return gamma * epsilon
def single_binary_convert(binary):
int_num = 0
for i, x in enumerate(binary[::-1]):
int_num += int(x) * 2 ** i
return int_num
def get_gamma(input_file):
'''returns the binary value for gamma'''
with open(input_file, 'r') as file:
data = [list(line.strip()) for line in file.readlines()]
array = np.array(data).astype(int)
return np.ceil(np.median(array, axis=0))
def get_oxygen_rating(input_file):
with open(input_file, 'r') as file:
data = [list(line.strip()) for line in file.readlines()]
bin_array = np.array(data).astype(int)
for col in range(bin_array.shape[1]):
key = np.ceil(np.median(bin_array, axis=0)).astype(int)
for row in range(bin_array.shape[0] -1, -1, -1):
if bin_array[row,col] != key[col]:
bin_array = np.delete(bin_array, [row], axis=0)
if bin_array.shape[0] == 1:
return bin_array[0]
return bin_array[0]
def get_co2_rating(input_file):
with open(input_file, 'r') as file:
data = [list(line.strip()) for line in file.readlines()]
bin_array = np.array(data).astype(int)
for col in range(bin_array.shape[1]):
key = np.ceil(np.median(bin_array, axis=0)).astype(int)
for row in range(bin_array.shape[0] -1, -1, -1):
if bin_array[row,col] == key[col]:
bin_array = np.delete(bin_array, [row], axis=0)
if bin_array.shape[0] == 1:
return bin_array[0]
return bin_array[0]
print(single_binary_convert(get_oxygen_rating(test_input)) * single_binary_convert(get_co2_rating(test_input)))
def get_gamma_pandas(input_file):
with open(input_file, 'r') as file:
data = [list(line.strip()) for line in file.readlines()]
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
# !/usr/bin/env python
# **************************************************************************************
# **************************************************************************************
# bidsmri2nidm.py
# License: GPL
# **************************************************************************************
# **************************************************************************************
# Date: 10-2-17 Coded by: <NAME> (<EMAIL>)
# Filename: bidsmri2nidm.py
#
# Program description: This program will convert a BIDS MRI dataset to a NIDM-Experiment
# RDF document. It will parse phenotype information and simply store variables/values
# and link to the associated json data dictionary file.
#
# **************************************************************************************
# Development environment: Python - PyCharm IDE
#
# **************************************************************************************
# System requirements: Python 3.X
# Libraries: pybids, numpy, matplotlib, pandas, scipy, math, dateutil, datetime,argparse,
# os,sys,getopt,csv
# **************************************************************************************
# Programmer comments:
#
#
# **************************************************************************************
# **************************************************************************************
import sys, getopt, os
import bids
from nidm.experiment import Project,Session,MRAcquisition,AcquisitionObject,DemographicsObject, AssessmentAcquisition, \
AssessmentObject,MRObject,Acquisition
from nidm.core import BIDS_Constants,Constants
from prov.model import PROV_LABEL,PROV_TYPE, ProvInfluence
from nidm.experiment.Utils import map_variables_to_terms, add_attributes_with_cde, addGitAnnexSources
from pandas import DataFrame
from prov.model import QualifiedName,Namespace
from os.path import isfile,join
from argparse import RawTextHelpFormatter
import json
import logging
import csv
import glob
from argparse import ArgumentParser
# Python program to find SHA256 hash string of a file
import hashlib
from io import StringIO
from rdflib import Graph, RDF, Literal,URIRef
def getRelPathToBIDS(filepath, bids_root):
"""
This function returns a relative file link that is relative to the BIDS root directory.
:param filename: absolute path + file
:param bids_root: absolute path to BIDS directory
:return: relative path to file, relative to BIDS root
"""
path,file = os.path.split(filepath)
relpath = path.replace(bids_root,"")
return(os.path.join(relpath,file))
def getsha512(filename):
"""
This function computes the SHA512 sum of a file
:param filename: path+filename of file to compute SHA512 sum for
:return: hexidecimal sha512 sum of file.
"""
sha512_hash = hashlib.sha512()
with open(filename,"rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096),b""):
sha512_hash.update(byte_block)
return sha512_hash.hexdigest()
def main(argv):
parser = ArgumentParser(description=
"""This program will represent a BIDS MRI dataset as a NIDM RDF document and provide user with opportunity to annotate
the dataset (i.e. create sidecar files) and associate selected variables with broader concepts to make datasets more
FAIR. \n\n
Note, you must obtain an API key to Interlex by signing up for an account at scicrunch.org then going to My Account
and API Keys. Then set the environment variable INTERLEX_API_KEY with your key. """ ,formatter_class=RawTextHelpFormatter)
parser.add_argument('-d', dest='directory', required=True, help="Full path to BIDS dataset directory")
parser.add_argument('-jsonld', '--jsonld', action='store_true', help='If flag set, output is json-ld not TURTLE')
#parser.add_argument('-png', '--png', action='store_true', help='If flag set, tool will output PNG file of NIDM graph')
parser.add_argument('-bidsignore', '--bidsignore', action='store_true', default = False, help='If flag set, tool will add NIDM-related files to .bidsignore file')
parser.add_argument('-no_concepts', '--no_concepts', action='store_true', default = False, help='If flag set, tool will no do concept mapping')
# adding argument group for var->term mappings
mapvars_group = parser.add_argument_group('map variables to terms arguments')
mapvars_group.add_argument('-json_map', '--json_map', dest='json_map',required=False,default=False,help="Optional full path to user-suppled JSON file containing variable-term mappings.")
#parser.add_argument('-nidm', dest='nidm_file', required=False, help="Optional full path of NIDM file to add BIDS data to. ")
parser.add_argument('-log','--log', dest='logfile',required=False, default=None, help="Full path to directory to save log file. Log file name is bidsmri2nidm_[basename(args.directory)].log")
parser.add_argument('-o', dest='outputfile', required=False, default="nidm.ttl", help="Outputs turtle file called nidm.ttl in BIDS directory by default..or whatever path/filename is set here")
args = parser.parse_args()
directory = args.directory
if args.logfile is not None:
logging.basicConfig(filename=join(args.logfile,'bidsmri2nidm_' + args.outputfile.split('/')[-2] + '.log'), level=logging.DEBUG)
# add some logging info
logging.info("bidsmri2nidm %s" %args)
# if args.owl is None:
# args.owl = 'nidm'
# importlib.reload(sys)
# sys.setdefaultencoding('utf8')
project, cde = bidsmri2project(directory,args)
# convert to rdflib Graph and add CDEs
rdf_graph = Graph()
rdf_graph.parse(source=StringIO(project.serializeTurtle()),format='turtle')
rdf_graph = rdf_graph + cde
logging.info("Writing NIDM file....")
# logging.info(project.serializeTurtle())
logging.info("Serializing NIDM graph and creating graph visualization..")
# serialize graph
# if args.outputfile was defined by user then use it else use default which is args.directory/nidm.ttl
if args.outputfile == "nidm.ttl":
# if we're choosing json-ld, make sure file extension is .json
# if args.jsonld:
# outputfile=os.path.join(directory,os.path.splitext(args.outputfile)[0]+".json")
# if flag set to add to .bidsignore then add
# if (args.bidsignore):
# addbidsignore(directory,os.path.splitext(args.outputfile)[0]+".json")
outputfile=os.path.join(directory,args.outputfile)
if (args.bidsignore):
addbidsignore(directory,args.outputfile)
rdf_graph.serialize(destination=outputfile,format='turtle')
# else:
# outputfile=os.path.join(directory,args.outputfile)
# if (args.bidsignore):
# addbidsignore(directory,args.outputfile)
else:
# if we're choosing json-ld, make sure file extension is .json
# if args.jsonld:
# outputfile = os.path.splitext(args.outputfile)[0]+".json"
# if (args.bidsignore):
# addbidsignore(directory,os.path.splitext(args.outputfile)[0]+".json")
# else:
# outputfile = args.outputfile
# if (args.bidsignore):
# addbidsignore(directory,args.outputfile)
outputfile=args.outputfile
if (args.bidsignore):
addbidsignore(directory,args.outputfile)
rdf_graph.serialize(destination=outputfile,format='turtle')
# serialize NIDM file
# with open(outputfile,'w') as f:
# if args.jsonld:
# f.write(project.serializeJSONLD())
# else:
# f.write(project.serializeTurtle())
# save a DOT graph as PNG
# if (args.png):
# project.save_DotGraph(str(outputfile + ".png"), format="png")
# # if flag set to add to .bidsignore then add
# if (args.bidsignore):
# addbidsignore(directory,os.path.basename(str(outputfile + ".png")))
def addbidsignore(directory,filename_to_add):
logging.info("Adding file %s to %s/.bidsignore..." %(filename_to_add,directory))
# adds filename_to_add to .bidsignore file in directory
if not isfile(os.path.join(directory,".bidsignore")):
with open(os.path.join(directory,".bidsignore"),"w") as text_file:
text_file.write("%s\n" %filename_to_add)
else:
if filename_to_add not in open(os.path.join(directory,".bidsignore")).read():
with open(os.path.join(directory,".bidsignore"),"a") as text_file:
text_file.write("%s\n" %filename_to_add)
def addimagingsessions(bids_layout,subject_id,session,participant, directory,img_session=None):
'''
This function adds imaging acquistions to the NIDM file and deals with BIDS structures potentially having
separate ses-* directories or not
:param bids_layout:
:param subject_id:
:param session:
:param participant:
:param directory:
:param img_session:
:return:
'''
for file_tpl in bids_layout.get(subject=subject_id, session=img_session, extension=['.nii', '.nii.gz']):
# create an acquisition activity
acq=MRAcquisition(session)
# check whether participant (i.e. agent) for this subject already exists (i.e. if participants.tsv file exists) else create one
if (not subject_id in participant) and (not subject_id.lstrip("0") in participant):
participant[subject_id] = {}
participant[subject_id]['person'] = acq.add_person(attributes=({Constants.NIDM_SUBJECTID:subject_id}))
acq.add_qualified_association(person=participant[subject_id]['person'],role=Constants.NIDM_PARTICIPANT)
# added to account for errors in BIDS datasets where participants.tsv may have no leading 0's but
# subject directories do. Since bidsmri2nidm starts with the participants.tsv file those are the IDs unless
# there's a subject directory and no entry in participants.tsv...
elif subject_id.lstrip("0") in participant:
# then link acquisition to the agent with participant ID without leading 00's
acq.add_qualified_association(person=participant[subject_id.lstrip("0")]['person'],role=Constants.NIDM_PARTICIPANT)
else:
# add qualified association with person
acq.add_qualified_association(person=participant[subject_id]['person'],role=Constants.NIDM_PARTICIPANT)
if file_tpl.entities['datatype']=='anat':
# do something with anatomicals
acq_obj = MRObject(acq)
# add image contrast type
if file_tpl.entities['suffix'] in BIDS_Constants.scans:
acq_obj.add_attributes({Constants.NIDM_IMAGE_CONTRAST_TYPE:BIDS_Constants.scans[file_tpl.entities['suffix']]})
else:
logging.info("WARNING: No matching image contrast type found in BIDS_Constants.py for %s" % file_tpl.entities['suffix'])
# add image usage type
if file_tpl.entities['datatype'] in BIDS_Constants.scans:
acq_obj.add_attributes({Constants.NIDM_IMAGE_USAGE_TYPE:BIDS_Constants.scans[file_tpl.entities['datatype']]})
else:
logging.info("WARNING: No matching image usage type found in BIDS_Constants.py for %s" % file_tpl.entities['datatype'])
# add file link
# make relative link to
acq_obj.add_attributes({Constants.NIDM_FILENAME:getRelPathToBIDS(join(file_tpl.dirname,file_tpl.filename), directory)})
# add git-annex info if exists
num_sources = addGitAnnexSources(obj=acq_obj,filepath=join(file_tpl.dirname,file_tpl.filename),bids_root=directory)
# if there aren't any git annex sources then just store the local directory information
if num_sources == 0:
# WIP: add absolute location of BIDS directory on disk for later finding of files
acq_obj.add_attributes({Constants.PROV['Location']:"file:/" + join(file_tpl.dirname,file_tpl.filename)})
# add sha512 sum
if isfile(join(directory,file_tpl.dirname,file_tpl.filename)):
acq_obj.add_attributes({Constants.CRYPTO_SHA512:getsha512(join(directory,file_tpl.dirname,file_tpl.filename))})
else:
logging.info("WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files..." %join(directory,file_tpl.dirname,file_tpl.filename))
# get associated JSON file if exists
# There is T1w.json file with information
json_data = (bids_layout.get(suffix=file_tpl.entities['suffix'],subject=subject_id))[0].metadata
if len(json_data.info)>0:
for key in json_data.info.items():
if key in BIDS_Constants.json_keys:
if type(json_data.info[key]) is list:
acq_obj.add_attributes({BIDS_Constants.json_keys[key.replace(" ", "_")]:''.join(str(e) for e in json_data.info[key])})
else:
acq_obj.add_attributes({BIDS_Constants.json_keys[key.replace(" ", "_")]:json_data.info[key]})
# Parse T1w.json file in BIDS directory to add the attributes contained inside
if (os.path.isdir(os.path.join(directory))):
try:
with open(os.path.join(directory,'T1w.json')) as data_file:
dataset = json.load(data_file)
except OSError:
logging.warning("Cannot find T1w.json file...looking for session-specific one")
try:
if img_session is not None:
with open(os.path.join(directory,'ses-' + img_session + '_T1w.json')) as data_file:
dataset = json.load(data_file)
else:
dataset={}
except OSError:
logging.warning("Cannot find session-specific T1w.json file which is required in the BIDS spec..continuing anyway")
dataset={}
else:
logging.critical("Error: BIDS directory %s does not exist!" %os.path.join(directory))
exit(-1)
# add various attributes if they exist in BIDS dataset
for key in dataset:
# if key from T1w.json file is mapped to term in BIDS_Constants.py then add to NIDM object
if key in BIDS_Constants.json_keys:
if type(dataset[key]) is list:
acq_obj.add_attributes({BIDS_Constants.json_keys[key]:"".join(dataset[key])})
else:
acq_obj.add_attributes({BIDS_Constants.json_keys[key]:dataset[key]})
elif file_tpl.entities['datatype'] == 'func':
# do something with functionals
acq_obj = MRObject(acq)
# add image contrast type
if file_tpl.entities['suffix'] in BIDS_Constants.scans:
acq_obj.add_attributes({Constants.NIDM_IMAGE_CONTRAST_TYPE:BIDS_Constants.scans[file_tpl.entities['suffix']]})
else:
logging.info("WARNING: No matching image contrast type found in BIDS_Constants.py for %s" % file_tpl.entities['suffix'])
# add image usage type
if file_tpl.entities['datatype'] in BIDS_Constants.scans:
acq_obj.add_attributes({Constants.NIDM_IMAGE_USAGE_TYPE:BIDS_Constants.scans[file_tpl.entities['datatype']]})
else:
logging.info("WARNING: No matching image usage type found in BIDS_Constants.py for %s" % file_tpl.entities['datatype'])
# make relative link to
acq_obj.add_attributes({Constants.NIDM_FILENAME:getRelPathToBIDS(join(file_tpl.dirname,file_tpl.filename), directory)})
# add git-annex/datalad info if exists
num_sources=addGitAnnexSources(obj=acq_obj,filepath=join(file_tpl.dirname,file_tpl.filename),bids_root=directory)
# if there aren't any git annex sources then just store the local directory information
if num_sources == 0:
# WIP: add absolute location of BIDS directory on disk for later finding of files
acq_obj.add_attributes({Constants.PROV['Location']:"file:/" + join(file_tpl.dirname,file_tpl.filename)})
# add sha512 sum
if isfile(join(directory,file_tpl.dirname,file_tpl.filename)):
acq_obj.add_attributes({Constants.CRYPTO_SHA512:getsha512(join(directory,file_tpl.dirname,file_tpl.filename))})
else:
logging.info("WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files..." %join(directory,file_tpl.dirname,file_tpl.filename))
if 'run' in file_tpl.entities:
acq_obj.add_attributes({BIDS_Constants.json_keys["run"]:file_tpl.entities['run']})
# get associated JSON file if exists
json_data = (bids_layout.get(suffix=file_tpl.entities['suffix'],subject=subject_id))[0].metadata
if len(json_data.info)>0:
for key in json_data.info.items():
if key in BIDS_Constants.json_keys:
if type(json_data.info[key]) is list:
acq_obj.add_attributes({BIDS_Constants.json_keys[key.replace(" ", "_")]:''.join(str(e) for e in json_data.info[key])})
else:
acq_obj.add_attributes({BIDS_Constants.json_keys[key.replace(" ", "_")]:json_data.info[key]})
# get associated events TSV file
if 'run' in file_tpl.entities:
events_file = bids_layout.get(subject=subject_id, extension=['.tsv'],modality=file_tpl.entities['datatype'],task=file_tpl.entities['task'],run=file_tpl.entities['run'])
else:
events_file = bids_layout.get(subject=subject_id, extension=['.tsv'],modality=file_tpl.entities['datatype'],task=file_tpl.entities['task'])
# if there is an events file then this is task-based so create an acquisition object for the task file and link
if events_file:
#for now create acquisition object and link it to the associated scan
events_obj = AcquisitionObject(acq)
#add prov type, task name as prov:label, and link to filename of events file
events_obj.add_attributes({PROV_TYPE:Constants.NIDM_MRI_BOLD_EVENTS,BIDS_Constants.json_keys["TaskName"]: json_data["TaskName"], Constants.NIDM_FILENAME:getRelPathToBIDS(events_file[0].filename, directory)})
#link it to appropriate MR acquisition entity
events_obj.wasAttributedTo(acq_obj)
# add source links for this file
# add git-annex/datalad info if exists
num_sources=addGitAnnexSources(obj=events_obj,filepath=events_file,bids_root=directory)
# if there aren't any git annex sources then just store the local directory information
if num_sources == 0:
# WIP: add absolute location of BIDS directory on disk for later finding of files
events_obj.add_attributes({Constants.PROV['Location']:"file:/" + events_file})
#Parse task-rest_bold.json file in BIDS directory to add the attributes contained inside
if (os.path.isdir(os.path.join(directory))):
try:
with open(os.path.join(directory,'task-rest_bold.json')) as data_file:
dataset = json.load(data_file)
except OSError:
logging.warning("Cannot find task-rest_bold.json file looking for session-specific one")
try:
if img_session is not None:
with open(os.path.join(directory,'ses-' + img_session +'_task-rest_bold.json')) as data_file:
dataset = json.load(data_file)
else:
dataset={}
except OSError:
logging.warning("Cannot find session-specific task-rest_bold.json file which is required in the BIDS spec..continuing anyway")
dataset={}
else:
logging.critical("Error: BIDS directory %s does not exist!" %os.path.join(directory))
exit(-1)
#add various attributes if they exist in BIDS dataset
for key in dataset:
#if key from task-rest_bold.json file is mapped to term in BIDS_Constants.py then add to NIDM object
if key in BIDS_Constants.json_keys:
if type(dataset[key]) is list:
acq_obj.add_attributes({BIDS_Constants.json_keys[key]:",".join(map(str,dataset[key]))})
else:
acq_obj.add_attributes({BIDS_Constants.json_keys[key]:dataset[key]})
# DBK added for ASL support 3/16/21
# WIP: Waiting for pybids > 0.12.4 to support perfusion scans
elif file_tpl.entities['datatype'] == 'perf':
acq_obj = MRObject(acq)
# add image contrast type
if file_tpl.entities['suffix'] in BIDS_Constants.scans:
acq_obj.add_attributes(
{Constants.NIDM_IMAGE_CONTRAST_TYPE: BIDS_Constants.scans[file_tpl.entities['suffix']]})
else:
logging.info(
"WARNING: No matching image contrast type found in BIDS_Constants.py for %s" % file_tpl.entities[
'suffix'])
# add image usage type
if file_tpl.entities['datatype'] in BIDS_Constants.scans:
acq_obj.add_attributes({Constants.NIDM_IMAGE_USAGE_TYPE: BIDS_Constants.scans["asl"]})
else:
logging.info(
"WARNING: No matching image usage type found in BIDS_Constants.py for %s" % file_tpl.entities[
'datatype'])
# make relative link to
acq_obj.add_attributes(
{Constants.NIDM_FILENAME: getRelPathToBIDS(join(file_tpl.dirname, file_tpl.filename), directory)})
# add sha512 sum
if isfile(join(directory, file_tpl.dirname, file_tpl.filename)):
acq_obj.add_attributes(
{Constants.CRYPTO_SHA512: getsha512(join(directory, file_tpl.dirname, file_tpl.filename))})
else:
logging.info(
"WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files..." % join(directory,
file_tpl.dirname,
file_tpl.filename))
# add git-annex/datalad info if exists
num_sources = addGitAnnexSources(obj=acq_obj, filepath=join(file_tpl.dirname, file_tpl.filename),
bids_root=directory)
if num_sources == 0:
acq_obj.add_attributes(
{Constants.PROV['Location']: "file:/" + join(file_tpl.dirname, file_tpl.filename)})
if 'run' in file_tpl.entities:
acq_obj.add_attributes({BIDS_Constants.json_keys["run"]: file_tpl.run})
# get associated JSON file if exists
json_data = (bids_layout.get(suffix=file_tpl.entities['suffix'], subject=subject_id))[0].metadata
if len(json_data.info) > 0:
for key in json_data.info.items():
if key in BIDS_Constants.json_keys:
if type(json_data.info[key]) is list:
acq_obj.add_attributes({BIDS_Constants.json_keys[key.replace(" ", "_")]: ''.join(
str(e) for e in json_data.info[key])})
else:
acq_obj.add_attributes(
{BIDS_Constants.json_keys[key.replace(" ", "_")]: json_data.info[key]})
# check if separate M0 scan exists, if so add location and filename
# WIP, waiting for pybids > 0.12.4 to support...
# WIP support B0 maps...waiting for pybids > 0.12.4
# elif file_tpl.entities['datatype'] == 'fmap':
elif file_tpl.entities['datatype'] == 'dwi':
#do stuff with with dwi scans...
acq_obj = MRObject(acq)
#add image contrast type
if file_tpl.entities['suffix'] in BIDS_Constants.scans:
acq_obj.add_attributes({Constants.NIDM_IMAGE_CONTRAST_TYPE:BIDS_Constants.scans[file_tpl.entities['suffix']]})
else:
logging.info("WARNING: No matching image contrast type found in BIDS_Constants.py for %s" % file_tpl.entities['suffix'])
#add image usage type
if file_tpl.entities['datatype'] in BIDS_Constants.scans:
acq_obj.add_attributes({Constants.NIDM_IMAGE_USAGE_TYPE:BIDS_Constants.scans["dti"]})
else:
logging.info("WARNING: No matching image usage type found in BIDS_Constants.py for %s" % file_tpl.entities['datatype'])
#make relative link to
acq_obj.add_attributes({Constants.NIDM_FILENAME:getRelPathToBIDS(join(file_tpl.dirname,file_tpl.filename), directory)})
#add sha512 sum
if isfile(join(directory,file_tpl.dirname,file_tpl.filename)):
acq_obj.add_attributes({Constants.CRYPTO_SHA512:getsha512(join(directory,file_tpl.dirname,file_tpl.filename))})
else:
logging.info("WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files..." %join(directory,file_tpl.dirname,file_tpl.filename))
# add git-annex/datalad info if exists
num_sources = addGitAnnexSources(obj=acq_obj,filepath=join(file_tpl.dirname,file_tpl.filename),bids_root=directory)
if num_sources == 0:
acq_obj.add_attributes({Constants.PROV['Location']: "file:/" + join(file_tpl.dirname,file_tpl.filename)})
if 'run' in file_tpl.entities:
acq_obj.add_attributes({BIDS_Constants.json_keys["run"]:file_tpl.run})
#get associated JSON file if exists
json_data = (bids_layout.get(suffix=file_tpl.entities['suffix'],subject=subject_id))[0].metadata
if len(json_data.info)>0:
for key in json_data.info.items():
if key in BIDS_Constants.json_keys:
if type(json_data.info[key]) is list:
acq_obj.add_attributes({BIDS_Constants.json_keys[key.replace(" ", "_")]:''.join(str(e) for e in json_data.info[key])})
else:
acq_obj.add_attributes({BIDS_Constants.json_keys[key.replace(" ", "_")]:json_data.info[key]})
#for bval and bvec files, what to do with those?
# for now, create new generic acquisition objects, link the files, and associate with the one for the DWI scan?
acq_obj_bval = AcquisitionObject(acq)
acq_obj_bval.add_attributes({PROV_TYPE:BIDS_Constants.scans["bval"]})
# add file link to bval files
acq_obj_bval.add_attributes({Constants.NIDM_FILENAME:getRelPathToBIDS(join(file_tpl.dirname,bids_layout.get_bval(join(file_tpl.dirname,file_tpl.filename))),directory)})
# add git-annex/datalad info if exists
num_sources = addGitAnnexSources(obj=acq_obj_bval,filepath=join(file_tpl.dirname,bids_layout.get_bval(join(file_tpl.dirname,file_tpl.filename))),bids_root=directory)
if num_sources == 0:
# WIP: add absolute location of BIDS directory on disk for later finding of files
acq_obj_bval.add_attributes({Constants.PROV['Location']:"file:/" + join(file_tpl.dirname,bids_layout.get_bval(join(file_tpl.dirname,file_tpl.filename)))})
# add sha512 sum
if isfile(join(directory,file_tpl.dirname,file_tpl.filename)):
acq_obj_bval.add_attributes({Constants.CRYPTO_SHA512:getsha512(join(directory,file_tpl.dirname,file_tpl.filename))})
else:
logging.info("WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files..." %join(directory,file_tpl.dirname,file_tpl.filename))
acq_obj_bvec = AcquisitionObject(acq)
acq_obj_bvec.add_attributes({PROV_TYPE:BIDS_Constants.scans["bvec"]})
#add file link to bvec files
acq_obj_bvec.add_attributes({Constants.NIDM_FILENAME:getRelPathToBIDS(join(file_tpl.dirname,bids_layout.get_bvec(join(file_tpl.dirname,file_tpl.filename))),directory)})
# add git-annex/datalad info if exists
num_sources = addGitAnnexSources(obj=acq_obj_bvec,filepath=join(file_tpl.dirname,bids_layout.get_bvec(join(file_tpl.dirname,file_tpl.filename))),bids_root=directory)
if num_sources == 0:
#WIP: add absolute location of BIDS directory on disk for later finding of files
acq_obj_bvec.add_attributes({Constants.PROV['Location']:"file:/" + join(file_tpl.dirname,bids_layout.get_bvec(join(file_tpl.dirname,file_tpl.filename)))})
if isfile(join(directory,file_tpl.dirname,file_tpl.filename)):
#add sha512 sum
acq_obj_bvec.add_attributes({Constants.CRYPTO_SHA512:getsha512(join(directory,file_tpl.dirname,file_tpl.filename))})
else:
logging.info("WARNING file %s doesn't exist! No SHA512 sum stored in NIDM files..." %join(directory,file_tpl.dirname,file_tpl.filename))
#link bval and bvec acquisition object entities together or is their association with DWI scan...
def bidsmri2project(directory, args):
# initialize empty cde graph...it may get replaced if we're doing variable to term mapping or not
cde=Graph()
# Parse dataset_description.json file in BIDS directory
if (os.path.isdir(os.path.join(directory))):
try:
with open(os.path.join(directory,'dataset_description.json')) as data_file:
dataset = json.load(data_file)
except OSError:
logging.critical("Cannot find dataset_description.json file which is required in the BIDS spec")
exit("-1")
else:
logging.critical("Error: BIDS directory %s does not exist!" %os.path.join(directory))
exit("-1")
# create project / nidm-exp doc
project = Project()
# if there are git annex sources then add them
num_sources=addGitAnnexSources(obj=project.get_uuid(),bids_root=directory)
# else just add the local path to the dataset
if num_sources == 0:
project.add_attributes({Constants.PROV['Location']:"file:/" + directory})
# add various attributes if they exist in BIDS dataset
for key in dataset:
# if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object
if key in BIDS_Constants.dataset_description:
if type(dataset[key]) is list:
project.add_attributes({BIDS_Constants.dataset_description[key]:"".join(dataset[key])})
else:
project.add_attributes({BIDS_Constants.dataset_description[key]:dataset[key]})
# get BIDS layout
bids.config.set_option('extension_initial_dot', True)
bids_layout = bids.BIDSLayout(directory)
# create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics
session={}
participant={}
# Parse participants.tsv file in BIDS directory and create study and acquisition objects
if os.path.isfile(os.path.join(directory,'participants.tsv')):
with open(os.path.join(directory,'participants.tsv')) as csvfile:
participants_data = csv.DictReader(csvfile, delimiter='\t')
# logic to map variables to terms.
# first iterate over variables in dataframe and check which ones are already mapped as BIDS constants and which are not. For those that are not
# we want to use the variable-term mapping functions to help the user do the mapping
# iterate over columns
mapping_list=[]
column_to_terms={}
for field in participants_data.fieldnames:
# column is not in BIDS_Constants
if not (field in BIDS_Constants.participants):
# add column to list for column_to_terms mapping
mapping_list.append(field)
#if user didn't supply a json mapping file but we're doing some variable-term mapping create an empty one for column_to_terms to use
if args.json_map == False:
#defaults to participants.json because here we're mapping the participants.tsv file variables to terms
# if participants.json file doesn't exist then run without json mapping file
if not os.path.isfile(os.path.join(directory,'participants.json')):
#maps variables in CSV file to terms
temp=
|
DataFrame(columns=mapping_list)
|
pandas.DataFrame
|
# Streamlit GUI for Music Data Correlation Exploration
# TO LAUNCH: streamlit run main.py
import streamlit as st
import pandas as pd
import glob
import seaborn as sns
import os
import csv
import zipfile
# take in ZIP file for tracks.csv and unzip
if glob.glob("tracks.csv") == []:
zf = zipfile.ZipFile('tracks.csv.zip','r')
r = zf.extract("tracks.csv")
print("Extracted tracks.csv.zip into", r)
# Read in tracks.csv
df = pd.read_csv("tracks.csv", index_col=None, header=0)
# Use release_date to create Year Released column with only int(year)
df["Year Released"] =
|
pd.to_datetime(df['release_date'])
|
pandas.to_datetime
|
"""
Quantilization functions and related stuff
"""
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.common import (
is_integer,
is_scalar,
is_categorical_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
_ensure_int64)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
from pandas._libs.lib import infer_dtype
from pandas import (to_timedelta, to_datetime,
Categorical, Timestamp, Timedelta,
Series, Interval, IntervalIndex)
import numpy as np
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False):
"""
Return indices of half-open bins to which each value of `x` belongs.
Parameters
----------
x : array-like
Input array to be binned. It has to be 1-dimensional.
bins : int, sequence of scalars, or IntervalIndex
If `bins` is an int, it defines the number of equal-width bins in the
range of `x`. However, in this case, the range of `x` is extended
by .1% on each side to include the min or max values of `x`. If
`bins` is a sequence it defines the bin edges allowing for
non-uniform bin width. No extension of the range of `x` is done in
this case.
right : bool, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the bins or not. Can be useful if bins is given
as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
include_lowest : bool, optional
Whether the first interval should be left-inclusive or not.
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
The `cut` function can be useful for going from a continuous variable to
a categorical variable. For example, `cut` could convert ages to groups
of age ranges.
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Categorical object
Examples
--------
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.19, 3.367], (0.19, 3.367], (0.19, 3.367], (3.367, 6.533], ...
Categories (3, interval[float64]): [(0.19, 3.367] < (3.367, 6.533] ...
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]),
... 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, good, medium, bad, good]
Categories (3, object): [good < medium < bad]
>>> pd.cut(np.ones(5), 4, labels=False)
array([1, 1, 1, 1, 1])
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= .001 * abs(mn) if mn != 0 else .001
mx += .001 * abs(mx) if mx != 0 else .001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
pass
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(x, bins, labels=labels,
precision=precision, include_lowest=True,
dtype=dtype, duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def _bins_to_cuts(x, bins, right=True, labels=None,
precision=3, include_lowest=False,
dtype=None, duplicates='raise'):
if duplicates not in ['raise', 'drop']:
raise ValueError("invalid value for 'duplicates' parameter, "
"valid options are: raise, drop")
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(repr(bins)))
else:
bins = unique_bins
side = 'left' if right else 'right'
ids = _ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(bins, precision, right=right,
include_lowest=include_lowest,
dtype=dtype)
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
if len(x) > 1 and x[-1] == '.':
x = x[:-1]
return x
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to integer so that cut method can
handle it
"""
dtype = None
if is_timedelta64_dtype(x):
x = to_timedelta(x).view(np.int64)
dtype = np.timedelta64
elif is_datetime64_dtype(x):
x = to_datetime(x).view(np.int64)
dtype = np.datetime64
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-liek of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins)
if is_timedelta64_dtype(dtype):
if bins_dtype in ['timedelta', 'timedelta64']:
bins = to_timedelta(bins).view(np.int64)
else:
raise ValueError("bins must be of timedelta64 dtype")
elif is_datetime64_dtype(dtype):
if bins_dtype in ['datetime', 'datetime64']:
bins = to_datetime(bins).view(np.int64)
else:
raise ValueError("bins must be of datetime64 dtype")
return bins
def _format_labels(bins, precision, right=True,
include_lowest=False, dtype=None):
""" based on the dtype, return our labels """
closed = 'right' if right else 'left'
if is_datetime64_dtype(dtype):
formatter = Timestamp
adjust = lambda x: x - Timedelta('1ns')
elif is_timedelta64_dtype(dtype):
formatter = Timedelta
adjust = lambda x: x - Timedelta('1ns')
else:
precision = _infer_precision(precision, bins)
formatter = lambda x: _round_frac(x, precision)
adjust = lambda x: x - 10 ** (-precision)
breaks = [formatter(b) for b in bins]
labels = IntervalIndex.from_breaks(breaks, closed=closed)
if right and include_lowest:
# we will adjust the left hand side by precision to
# account that we are all right closed
v = adjust(labels[0].left)
i = IntervalIndex.from_intervals(
[
|
Interval(v, labels[0].right, closed='right')
|
pandas.Interval
|
import numpy as np
import pandas as pd
import sys
#from sklearn import tree
#from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import roc_auc_score
from dataset import DataSet
import xgboost as xgb
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
#
# General Overview
# Module -1: Processing output files from automate_data.py
# Module 0 : Specifying feature names and output file names
# Module 1 : Preprocessing
# Module 2 : Defining the main functions
# Module 3 : Running the main functions
#
# We assume that the user has one conservation site with labeled data (conservation site 1) concerning where poaching
# and past patrol efforts have occurred, and use this knowledge to predict where future illegal attempts will be made in
# both conservation site 1 and the unlabeled area (conservation site 2), assuming their feature spaces are the same.
# We employ dynamic negative sampling of the unknown data, oversampling of the positive examples, and xgboost.
#
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
#
# Module -1 : Processing output files from automate_data.py
#
#
#
files = ['/Users/tianyug/Desktop/QuickEmployment_Toy/csv_output/X.csv', '/Users/tianyug/Desktop/QuickEmployment_Toy/csv_output/Y.csv', '/Users/tianyug/Desktop/QuickEmployment_Toy/csv_output/is-toy_patrol.csv', '/Users/tianyug/Desktop/QuickEmployment_Toy/csv_output/is-toy_poaching.csv', '/Users/tianyug/Desktop/QuickEmployment_Toy/csv_output/is-toy_road.csv', '/Users/tianyug/Desktop/QuickEmployment_Toy/csv_output/dist-toy_patrol.csv', '/Users/tianyug/Desktop/QuickEmployment_Toy/csv_output/dist-toy_poaching.csv', '/Users/tianyug/Desktop/QuickEmployment_Toy/csv_output/dist-toy_road.csv', '/Users/tianyug/Desktop/QuickEmployment_Toy/csv_output/toy_altitude.csv']
column_names = ['X', 'Y', 'is-toy_patrol', 'is-toy_poaching', 'is-toy_road', 'dist-toy_patrol', 'dist-toy_poaching', 'dist-toy_road', 'toy_altitude']
raw_df_list = []
# read files into dataframe
for f in files:
print (f)
raw_df_list.append(pd.read_csv(f))
# get the DN as a dataframe
DN_df = raw_df_list[0][['DN']].sort_values(by=['DN'])
DN_df.reset_index(inplace=True)
# rename columns and sort based on DN
select_df_list = []
for i in range(0,len(raw_df_list)):
# rename columns
col_names = ['DN',column_names[i]]
raw_df_list[i].columns = col_names
# sort by DN
cur_sorted_df = raw_df_list[i].sort_values(by=['DN'])
cur_sorted_df.reset_index(inplace=True)
# select revelant columns
cur_select_df = cur_sorted_df[[column_names[i]]]
# normalize the selected columns
cur_normalized_df = (cur_select_df - cur_select_df.min())/(cur_select_df.max()-cur_select_df.min())
cur_normalized_df.columns = ["normal-"+column_names[i]]
select_df_list.append(cur_select_df)
if column_names[i][0:3] != 'is-':
select_df_list.append(cur_normalized_df)
# concatenate columns
select_df_list = [DN_df] + select_df_list
comb_DN_ABC = pd.concat(select_df_list, axis=1)
comb_DN_ABC.sort_values(by=["DN"],inplace=True)
comb_DN_ABC.drop(['index'], axis=1)
comb_DN_ABC.to_csv("final.csv")
# sys.exit()
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
#
# Module 0 : Specifying paths and file names
#
# The following are placeholders/examples
#
# name of excel sheet containing all the features and labels for conservation 1 and 2
fn1 = "final.csv"
fn2 = "final.csv"
# name of text file output for probabilistic predictions of
# each grid cell in conservations 1 and 2
qgis_file_in1 = "predictions1.txt"
qgis_file_in2 = "predictions2.txt"
# raster file of probabilistic predictions
qgis_file_out1 = "predictions_heatmap1.asc"
qgis_file_out2 = "predictions_heatmap2.asc"
# specify which features to use from final.csv feature spreadsheet
selected_features = ["is-toy_road",
"normal-dist-toy_road",
"normal-toy_altitude"
]
# specify which feature symbolizes where patrolling occurs
patrol = 'is-toy_patrol'
# specify which feature symbolizes where poaching occurs
poaching = 'is-toy_poaching'
# represents the coordinates of the left bottom corner for
# conservation site 1 (longitude and latitude if working with WGS84)
xcorner1 = 127.76402335
ycorner1 = 43.5257568717
# represents the coordinates of the left bottom corner for
# conservation site 2 (longitude and latitude if working with WGS84)
xcorner2 = 127.76402335
ycorner2 = 43.5257568717
# define the grid sizes (discretization levels) for each conservations ite
# which should match from the automate_data.py script
gridDim1 = 0.01
gridDim2 = 0.01
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
####################################################################################################################################################
#
# Module 1 : Preprocessing
#
df_alldata =
|
pd.read_csv(fn1)
|
pandas.read_csv
|
import requests
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
# Enforce incognito mode
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--incognito")
from selenium.webdriver.common.keys import Keys
from webdriver_manager.firefox import GeckoDriverManager
import numpy as np
from numpy import array
import pandas as pd
import csv
from datetime import date, datetime
# PULLING BY SEASON
def nhl_pull(str_url):
driver.get(str_url); # get to the url
try: # Wait until the table appears - JavaScript table may appear slower than other page elements
element = WebDriverWait(driver, 50).until(
EC.presence_of_element_located((By.CLASS_NAME, "rt-table"))
)
finally:
None
time.sleep(2); #Just in case
# Pull from information
html = driver.page_source # Pull the script information
soup = BeautifulSoup(html) # Soupify
# Get table header
rtheader = soup.find_all("div", {"class": "rt-table"})
n_pagecount = int(soup.find_all("span", {"class": "-totalPages"})[0].text) - 1; # number of pages to scrape
# NOTE: page numbers are zero indexed. be careful - using index, number of pages to pull
# Inside a function - this is throwing an error
tableheader = soup.find_all("div", {"class": "tableHeaderDiv"})[0].find_all("div", {"class": "rt-header-cell"})
str_titles = ["idx_row"]#['season start', 'season end']
for temp_str in tableheader:
temp_str_extract = temp_str.get('title');
if temp_str_extract == None:
temp_str_extract
else:
str_titles.append(temp_str_extract)
n_title = len(str_titles);
# Pulling the data.
table_data = soup.find_all("div", {"class": "rt-tbody"})[0].find_all("div", {"class" : "rt-tr-group"})
ary_data = [];
for idx_count, iter_row in enumerate(table_data):
each_row = iter_row.find_all("div", {"class" : "rt-td"})
temp_vec = [];
for iter_col in each_row:
temp_vec.append(iter_col.text) # save the data in order
if idx_count == 0: #start creating the array
ary_data = np.array(temp_vec)
else: # Do horizontal stack
ary_data = np.vstack((ary_data, np.array(temp_vec)))
# Convert to data frame
# Note: converting to array just in case it becomes a one row list.
df_data = pd.DataFrame(np.reshape(ary_data, (-1, len(str_titles))), columns = str_titles)
# Pull total record count
n_recordcount = int(soup.find_all("span", {"class": "-totalInfo"})[0].text.split()[0]);
return {'df': df_data, 'n_pagecount': n_pagecount, 'n_title': n_title, "n_recordcount" : n_recordcount} # Return the dataframe of data & pagecount for multiple pages to pull
def strip_right(df, suffix):
df.columns = df.columns.str.rstrip(suffix)
return df
# Pull URL of the team
def url_team_pull(idx_data_type, idx_report, idx_report_type, iter_date_start, iter_date_end, str_gametype, i_npage, idx_datetype):
URL_team_summary = (f"http://www.nhl.com/stats/"
f"{idx_data_type}?aggregate=0&{idx_report}reportType={idx_report_type}&"
f"{idx_datetype}From={iter_date_start}&{idx_datetype}To={iter_date_end}&"
f"gameType={str_gametype}&filter=gamesPlayed,gte,1&page={i_npage}&pageSize=100")
# Note that in previous iteration idx_aggregate == 'aggregate=0&' - no need because the workflow is pulled by season.
return URL_team_summary
def nhl_pull_loop(str_date_start, str_date_end, str_page, idx_data_type, idx_report_type, idx_datetype):
for idx, iter_date_start in enumerate(str_date_start):
iter_date_end = str_date_end[idx];
df_fin = [];
for idx_game, iter_game in enumerate(["regular", "playoff"]):
# In-loop-specific initial settings
str_gametype = idx_game + 2; # start with regular season
i_npage = 0; # start with page 1
idx_report = ''; # start with the summary page
# temporary data frame save
temp_df = [];
URL_team_summary = url_team_pull(idx_data_type, idx_report, idx_report_type, iter_date_start, iter_date_end, str_gametype, i_npage, idx_datetype);
temp_pulled = nhl_pull(URL_team_summary)
temp_df = temp_pulled['df']; # Initialize
npage = temp_pulled['n_pagecount'];
nrecord = temp_pulled['n_recordcount'];
if nrecord == 0:
continue # break out from the loop.
else: # Continue pulling the data for having a record
# For more than one record
if npage != 0:
for i_npage in range(1, npage + 1): # Python range, need to add one.
URL_team_summary = url_team_pull(idx_data_type, idx_report, idx_report_type, iter_date_start, iter_date_end, str_gametype, i_npage, idx_datetype);
temp_pulled = nhl_pull(URL_team_summary)
temp_df = temp_df.append(temp_pulled['df']);
else:
None
# All summary data pulled, remove empty rows
temp_df = temp_df.loc[(temp_df.idx_row != '\xa0'),:];
# Summary stats, just to check the right count of data.
#temp_df.to_csv(f'df_{idx_data_type}_{idx_report_type}_{iter_season}_summaryOnly.csv',index = False)
# Pull other data - more specific statistics,
for temp_idx in str_page:
# Set specific parameters for different categories - pages
idx_report = "report=" + temp_idx + "&";
i_npage = 0; # start with page 1, Reset
URL_team_summary = url_team_pull(idx_data_type, idx_report, idx_report_type, iter_date_start, iter_date_end, str_gametype, i_npage, idx_datetype);
# Pull date
temp_pulled = nhl_pull(URL_team_summary)
# Because this is different categories - neeed to make master partial file
temp_df_partial = temp_pulled['df'];
# Need to join the data frame
npage = temp_pulled['n_pagecount']
if npage != 0: # Pull data from multiple pages
for i_npage in range(1, npage + 1): # Python range, need to add one.
URL_team_summary = url_team_pull(idx_data_type, idx_report, idx_report_type, iter_date_start, iter_date_end, str_gametype, i_npage, idx_datetype);
temp_pulled = nhl_pull(URL_team_summary); # Pull additional data
temp_df_partial = temp_df_partial.append(temp_pulled['df']); # stack multiple pages
else:
None
# Save the data
# First, must clean up the empty rows, just to make sure not to join empty-empty
temp_df_partial = temp_df_partial.loc[(temp_df_partial.idx_row != '\xa0'),:];
if (temp_pulled['df'].size != 0): # If the page has at least one entry
if idx_data_type == 'teams': # For merging team statistics
if idx_report_type == 'season':
temp_df = pd.merge(temp_df, temp_df_partial, how = 'left', on = "Team", suffixes=('_x', '_y'))
elif idx_report_type == 'game':
temp_df = pd.merge(temp_df, temp_df_partial, how = 'left', on = ["Team", "Game"], suffixes=('_x', '_y'))
else:
None
else: ## For skaters and goalies
if idx_report_type == 'season':
if temp_idx == 'bios':
if idx_data_type == 'skaters': # To match with unique player identity, in case there are players with same name in each period
temp_df = pd.merge(temp_df, temp_df_partial, how = 'left', on = ['Player Name', 'Player Position', 'Games Played'], suffixes=('', '_y'))
else: # For goalies
temp_df =
|
pd.merge(temp_df, temp_df_partial, how = 'left', on = ['Player Name', 'Goalie Catches'], suffixes=('', '_y'))
|
pandas.merge
|
"""Graphs differences in signal measurements of channels over time (scan instances)
"""
import json
from itertools import cycle
import pandas as pd
import plotly
import plotly.graph_objects as go
from db import load
class TrackChannels():
def __init__(self):
super().__init__()
self.default_signal_measurement = "snq"
self.signal_measurements = ["snq", "ss", "seq"]
self.colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b",
"#e377c2", "#7f7f7f", "#bcbd22", "#17becf", "goldenrod", "darkseagreen",
"palevioletred", "slateblue", "teal", "chocolate", "deepskyblue", "lightcoral",
"greenyellow", "dodgerblue", "darksalmon", "khaki", "plum", "lightgreen",
"mediumslateblue", "olive", "darkgray", "fuschia", "ivory"]
self.color_cycle = cycle(self.colors)
self.default_antenna = load("SELECT configured_antenna_instance FROM monitor")["configured_antenna_instance"].to_list()[0]
self.current_antenna = self.default_antenna
antenna_df = load("SELECT * FROM antenna")
self.antenna_map = {instance: {"name": str(instance) + "; " + "Name: " + name + ", Location: " + location + ", Direction: " + str(direction) + " degrees, Comments: "}
for instance, name, location, direction, comment in zip(antenna_df["antenna_instance"], antenna_df["name"], antenna_df["location"],
antenna_df["direction"], antenna_df["comment"])}
# Remove quotations
for i, description in enumerate(self.antenna_map.values()):
if "'" in self.antenna_map[i + 1]["name"]:
self.antenna_map[i + 1]["name"] = self.antenna_map[i + 1]["name"].replace("'", "")
if "\"" in self.antenna_map[i + 1]["name"]:
self.antenna_map[i + 1]["name"] = self.antenna_map[i + 1]["name"].replace("\"", "")
self.fig = None
self.real_channels = None
self.labels = None
self.mdf = None
def _build_df(self):
signals =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Animation Player sample with matplotlib GUI
You can playback data as animation.
The animation progress can be controled by slider GUI.
Playback speed can be controled by radio button.
Animation start/stop can be controled by radio button.
"""
from matplotlib.widgets import RadioButtons, Slider
import matplotlib.pyplot as plt
import matplotlib.gridspec as gds
import pandas as pd
import datetime
def update_animation(crnt_date_time, crnt_x, crnt_y, crnt_qly, total_hdop, total_sat):
global pb_spd_prm
ax_pos.set_title(crnt_date_time)
if crnt_qly == 1:
snd_aln_plot.set_data(crnt_x, crnt_y)
diff_plot.set_data([], [])
elif crnt_qly == 2:
snd_aln_plot.set_data([], [])
diff_plot.set_data(crnt_x, crnt_y)
quality_text.set_text('GPS Quality = %d' % (crnt_qly))
dt_hdop_plot.set_data(pd.to_datetime(total_hdop.index), total_hdop.values)
ax_hdop.set_title(total_hdop.values[-1])
dt_sat_plot.set_data(pd.to_datetime(total_sat.index), total_sat.values)
ax_sat.set_title(total_sat.values[-1])
plt.pause(0.05 * pb_spd_prm)
if __name__ == '__main__':
# close all figure window
plt.close('all')
# read data frame csv file
nmea_gpgga_data_frame =
|
pd.read_csv('nmea_gpgga_data_frame.csv', index_col=0)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex, Categorical
from pandas.compat import StringIO
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
class DtypeTests(object):
def test_passing_dtype(self):
# see gh-6607
df = DataFrame(np.random.rand(5, 2).round(4), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# see gh-3795: passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
# for parsing, interpret object as str
result = self.read_csv(path, dtype=object, index_col=0)
tm.assert_frame_equal(result, expected)
# we expect all object columns, so need to
# convert to test for equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# see gh-12048: empty frame
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
assert result['one'].dtype == 'u1'
assert result['two'].dtype == 'object'
def test_categorical_dtype(self):
# GH 10153
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['a', 'a', 'b']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype=CategoricalDtype())
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'a': 'category',
'b': 'category',
'c': CategoricalDtype()})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'b': 'category'})
expected = pd.DataFrame({'a': [1, 1, 2],
'b': Categorical(['a', 'a', 'b']),
'c': [3.4, 3.4, 4.5]})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
# unsorted
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', 'b', 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
# missing
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', np.nan, 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
@pytest.mark.slow
def test_categorical_dtype_high_cardinality_numeric(self):
# GH 18186
data = np.sort([str(i) for i in range(524289)])
expected = DataFrame({'a': Categorical(data, ordered=True)})
actual = self.read_csv(StringIO('a\n' + '\n'.join(data)),
dtype='category')
actual["a"] = actual["a"].cat.reorder_categories(
np.sort(actual.a.cat.categories), ordered=True)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_encoding(self):
# GH 10153
pth = tm.get_data_path('unicode_series.csv')
encoding = 'latin-1'
expected = self.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = self.read_csv(pth, header=None, encoding=encoding,
dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
pth = tm.get_data_path('utf16_ex.txt')
encoding = 'utf-16'
expected = self.read_table(pth, encoding=encoding)
expected = expected.apply(Categorical)
actual = self.read_table(pth, encoding=encoding, dtype='category')
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'])}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'])},
index=[2, 3])]
actuals = self.read_csv(StringIO(data), dtype={'b': 'category'},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('categories', [
['a', 'b', 'c'],
['a', 'c', 'b'],
['a', 'b', 'c', 'd'],
['c', 'b', 'a'],
])
def test_categorical_categoricaldtype(self, categories, ordered):
data = """a,b
1,a
1,b
1,b
2,c"""
expected = pd.DataFrame({
"a": [1, 1, 1, 2],
"b": Categorical(['a', 'b', 'b', 'c'],
categories=categories,
ordered=ordered)
})
dtype = {"b": CategoricalDtype(categories=categories,
ordered=ordered)}
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_unsorted(self):
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(['c', 'b', 'a'])
expected = pd.DataFrame({
'a': [1, 1, 1, 2],
'b': Categorical(['a', 'b', 'b', 'c'], categories=['c', 'b', 'a'])
})
result = self.read_csv(StringIO(data), dtype={'b': dtype})
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_numeric(self):
dtype = {'b': CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = pd.DataFrame({'b': Categorical([1, 1, 2, 3])})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_datetime(self):
dtype = {
'b': CategoricalDtype(pd.date_range('2017', '2019', freq='AS'))
}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
dtype = {
'b': CategoricalDtype([pd.Timestamp("2014")])
}
data = "b\n2014-01-01\n2014-01-01T00:00:00"
expected = pd.DataFrame({'b': Categorical([pd.Timestamp('2014')] * 2)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_timedelta(self):
dtype = {'b': CategoricalDtype(pd.to_timedelta(['1H', '2H', '3H']))}
data = "b\n1H\n2H\n3H"
expected = pd.DataFrame({'b':
|
Categorical(dtype['b'].categories)
|
pandas.Categorical
|
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
|
tm.assert_almost_equal(correls['A']['C'], expected)
|
pandas.util.testing.assert_almost_equal
|
"""Exports lexicons from line annotations and transcribed files."""
import argparse
import logging
import spacy
import re
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from pathlib import Path
def load_data(sql, server, database, user, password, port=5432):
"""Load data from database by running the provided sql query.
Parameters
----------
sql: str, required
The SQL query to execute for loading data.
server : str, required
The name or IP address of the database server.
database : str, required
The name of the database containing annotations.
user : str, required
The username which is allowed to connect to the database.
password : str, required
The password of the username.
port : str, optional
The port for connecting to the database.
Returns
-------
data : padas.DataFrame
A dataframe containing the data fetched from database.
"""
conn_str = 'postgresql://{user}:{password}@{server}:{port}/{database}'
conn_str = conn_str.format(user=user,
password=password,
server=server,
port=port,
database=database)
engine = create_engine(conn_str)
with engine.connect() as conn:
return pd.read_sql(sql, conn)
def load_line_annotations(server, database, user, password, port=5432):
"""Load line annotations from the database specified by parameters.
Parameters
----------
server : str, required
The name or IP address of the database server.
database : str, required
The name of the database containing annotations.
user : str, required
The username which is allowed to connect to the database.
password : str, required
The password of the username.
port : str, optional
The port for connecting to the database.
Returns
-------
line_annotations : padas.DataFrame
A dataframe containing line annotations with the following columns:
- publishing_year
- document
"""
logging.info("Loading annotations from database...")
sql = """
SELECT PUB.PUBLISHINGYEAR AS PUBLISHING_YEAR, LA.LINE AS DOCUMENT
FROM LINE_ANNOTATIONS LA
JOIN PAGECOLLECTIONMETADATA PCM ON LA.PAGE_COLLECTION_ID = PCM.PAGECOLLECTIONID
JOIN PUBLISHING PUB ON PCM.ROCCID = PUB.METADATAID
"""
line_annotations = load_data(sql, server, database, user, password)
num_rows, _ = line_annotations.shape
logging.info("Finished loading {} lines annotations from database.".format(
num_rows))
return line_annotations
def load_transcribed_text_files(server, database, user, password, port=5432):
"""Load transcribed text files from disk.
Parameters
----------
server : str, required
The name or IP address of the database server.
database : str, required
The name of the database containing annotations.
user : str, required
The username which is allowed to connect to the database.
password : str, required
The password of the username.
port : str, optional
The port for connecting to the database.
Returns
-------
transcribed_texts : padas.DataFrame
A dataframe containing transcribed texts with the following columns:
- publishing_year
- document
"""
logging.info("Loading document metadata from database...")
sql = """
SELECT PUB.PUBLISHINGYEAR AS PUBLISHING_YEAR,
PC.INTEGRALTRANSCRIBEDTEXTFILE AS DOCUMENT
FROM PAGECOLLECTIONS PC
JOIN PAGECOLLECTIONMETADATA PCM ON PC.ID = PCM.PAGECOLLECTIONID
JOIN PUBLISHING PUB ON PCM.ROCCID = PUB.METADATAID
"""
data = load_data(sql, server, database, user, password)
num_rows, _ = data.shape
logging.info(
"Finished loading {} metadata rows from database.".format(num_rows))
documents = {'publishing_year': [], 'document': []}
for row in data.itertuples():
file_path = Path(row.document)
if file_path.suffix == '.xml':
logging.info("Ignoring XML document {}.".format(row.document))
continue
if not file_path.exists():
logging.warning("File {} does not exist.".format(row.document))
continue
if not file_path.is_file():
logging.warning("Path {} does not point to a file.".format(
row.document))
continue
logging.info('Adding document {} to documents dataframe.'.format(
row.document))
documents['publishing_year'].append(row.publishing_year)
documents['document'].append(file_path.read_text(encoding='utf8'))
return pd.DataFrame(documents)
def is_valid_token(token):
"""Check if token is valid.
Parameters
----------
token: str, required
The token to process.
Returns
-------
is_valid: bool
True if token is valid; False otherwise.
"""
# Remove spaces from token
token = token.replace(' ', '')
if len(token) == 0:
return False
if re.search(r'[0-9\.,?=:/"]', token):
return False
# Exclude tokens that start or end with dash '-'
# This usually signals that a single word was split into two lines
if (token[0] == '-' or token[-1] == '-'):
return False
# Exclude single-character tokens that contain various marks
if len(token) == 1:
return token in ['›', '‹', '"']
return True
def build_vocabulary(documents):
"""Build vocabulary from provided documents.
Parameters
----------
documents: iterable of str, required
The documents from which to build vocabulary.
Returns
-------
vocabulary: set of str
The set of vocabulary terms.
"""
nlp = spacy.load('ro_core_news_lg')
tokens = [
str(token) for text in documents for token in nlp(text=text)
if is_valid_token(str(token))
]
return set([token.lower() for token in tokens])
def format_period(period):
"""Build a pretty name for a time interval.
Parameters
----------
period: pandas.Interval, required
The time interval to format.
Returns
-------
name: str
The period name in format '<period.left>-<period.right>'.
"""
left, right = int(period.left), int(period.right)
return '{left}-{right}'.format(left=left, right=right)
def build_vocabulary_file_name(period):
"""Build vocabulary file name for given period.
Parameters
----------
period: pandas.Interval, required
The time period for which to build file name.
Returns
-------
file_name: str
The file name in format '<period.left>-<period.right>.csv'.
"""
return "{name}.csv".format(name=format_period(period))
def to_csv(dataframe, file_name, write_header):
"""Save provided dataframe to specified file name in CSV format.
Parameters
----------
dataframe: pandas.DataFrame, required
The data frame to save.
file_name: str, required
The full name of the file where to save the dataframe.
write_header: bool, required
Specifies whether to write the header row or not.
"""
dataframe.to_csv(file_name, index=False, header=write_header)
def save_data_frame(df, title, output_dir, file_name, write_header):
"""Save data frame to specified path.
Parameters
----------
df: pandas.DataFrame, required
The data frame to save.
title: str, required
The title of data frame; this will be displayed in logging message.
output_dir: str, required
The parent directory of the file in which to save the data frame.
If directory does not exist, it will be created.
file_name: str, required
The name of the file in which to save the data frame.
write_header: bool, required
Specifies whether to write header row of the data frame or not.
"""
path = Path(output_dir)
path.mkdir(parents=True, exist_ok=True)
path = path / file_name
file_name = str(path)
logging.info("Saving {what} in {where}.".format(what=title,
where=file_name))
to_csv(df, file_name, write_header)
def save_vocabulary(vocab, directory_name, file_name, write_header=False):
"""Save provided vocabulary in the specified file and directory.
Parameters
----------
vocab: iterable of str, required
The iterable containing vocabulary terms.
directory_name: str, required
The directory where to save vocabulary.
file_name: str, required
The name of the file where to save vocabulary.
write_heaeder: bool, optional
Specifies whether the output file should contain a header row.
Default is False.
"""
path = Path(directory_name)
path.mkdir(parents=True, exist_ok=True)
path = path / file_name
file_path = str(path)
logging.info("Saving {count} tokens from lexicon into file {file}.".format(
file=file_path, count=len(vocab)))
df =
|
pd.DataFrame(vocab, columns=['Term'])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import matplotlib
from importlib import reload
import matplotlib.pyplot as plt
import elements
elements = reload(elements)
from elements.event import Event
import os
from scipy.fft import fft, fftfreq, ifft
#%%
#meta data
meta_event =
|
pd.read_csv('data/meta_data.csv')
|
pandas.read_csv
|
import sys,os
#os.chdir("/Users/utkarshvirendranigam/Desktop/Homework/Project")
# required_packages=["PyQt5","re", "scipy","itertools","random","matplotlib","pandas","numpy","sklearn","pydotplus","collections","warnings","seaborn"]
#print(os.getcwd())
# for my_package in required_packages:
# try:
# command_string="conda install "+ my_package+ " --yes"
# os.system(command_string)
# except:
# count=1
from PyQt5.QtWidgets import (QMainWindow, QApplication, QWidget, QPushButton, QAction, QComboBox, QLabel,
QGridLayout, QCheckBox, QGroupBox, QVBoxLayout, QHBoxLayout, QLineEdit, QPlainTextEdit)
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, QRect
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import Qt
# from scipy import interp
from itertools import cycle, combinations
import random
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QSizePolicy, QFormLayout, QRadioButton, QScrollArea, QMessageBox
from PyQt5.QtGui import QPixmap
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import pandas as pd
import numpy as np
import pickle
from numpy.polynomial.polynomial import polyfit
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.compose import make_column_transformer
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc, log_loss, brier_score_loss
from sklearn.calibration import calibration_curve
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import feature_selection
from sklearn import metrics
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import cross_val_predict
# Libraries to display decision tree
from pydotplus import graph_from_dot_data
import collections
from sklearn.tree import export_graphviz
import webbrowser
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
from Preprocessing import PreProcessing
import random
import seaborn as sns
#%%-----------------------------------------------------------------------
import os
os.environ["PATH"] += os.pathsep + 'C:\\Program Files (x86)\\graphviz-2.38\\release\\bin'
#%%-----------------------------------------------------------------------
#::--------------------------------
# Deafault font size for all the windows
#::--------------------------------
font_size_window = 'font-size:18px'
class DecisionTree(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(DecisionTree, self).__init__()
self.Title = "Decision Tree Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Decision Tree Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.lblMaxDepth = QLabel('Maximun Depth :')
self.txtMaxDepth = QLineEdit(self)
self.txtMaxDepth.setText("3")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
self.btnDTFigure = QPushButton("View Tree")
self.btnDTFigure.setGeometry(QRect(60, 500, 75, 23))
self.btnDTFigure.clicked.connect(self.view_tree)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.lblMaxDepth, 20, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtMaxDepth, 20, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox1Layout.addWidget(self.btnDTFigure, 21, 1, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_lr = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_rf = QLineEdit()
self.other_models.layout.addRow('Logistic:', self.txtAccuracy_lr)
self.other_models.layout.addRow('Random Forest:', self.txtAccuracy_rf)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : Importance of Features
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('Importance of Features')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
if self.feature15.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[15]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[15]]], axis=1)
if self.feature16.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[16]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[16]]], axis=1)
if self.feature17.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[17]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[17]]], axis=1)
if self.feature18.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[18]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[18]]], axis=1)
if self.feature19.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[19]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[19]]], axis=1)
if self.feature20.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[20]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[20]]],axis=1)
if self.feature21.isChecked():
if len(self.list_corr_features) == 20:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[21]]],axis=1)
if self.feature22.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[22]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[22]]],axis=1)
if self.feature23.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[23]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[23]]],axis=1)
if self.feature24.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[24]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[24]]],axis=1)
vtest_per = float(self.txtPercentTest.text())
vmax_depth = float(self.txtMaxDepth.text())
# Clear the graphs to populate them with the new information
self.ax1.clear()
self.ax2.clear()
self.ax3.clear()
self.ax4.clear()
# self.txtResults.clear()
# self.txtResults.setUndoRedoEnabled(False)
vtest_per = vtest_per / 100
# -----------------------------------------------------------------------
filename = 'dt_finalized_model.sav'
self.clf_entropy = pickle.load(open(filename, 'rb'))
y_test = y
X_test= X[features_list]
# predicton on test using entropy
y_pred_entropy = self.clf_entropy.predict(X_test)
# confusion matrix for RandomForest
conf_matrix = confusion_matrix(y_test, y_pred_entropy)
# accuracy score
self.ff_accuracy_score = accuracy_score(y_test, y_pred_entropy) * 100
self.txtCurrentAccuracy.setText(str(self.ff_accuracy_score))
# precision score
self.ff_precision_score = precision_score(y_test, y_pred_entropy) * 100
self.txtCurrentPrecision.setText(str(self.ff_precision_score))
# recall score
self.ff_recall_score = recall_score(y_test, y_pred_entropy) * 100
self.txtCurrentRecall.setText(str(self.ff_recall_score))
# f1_score
self.ff_f1_score = f1_score(y_test, y_pred_entropy)
self.txtCurrentF1score.setText(str(self.ff_f1_score))
#::------------------------------------
## Ghaph1 :
## Confusion Matrix
#::------------------------------------
class_names1 = ['', 'No', 'Yes']
self.ax1.matshow(conf_matrix, cmap=plt.cm.get_cmap('Blues', 14))
self.ax1.set_yticklabels(class_names1)
self.ax1.set_xticklabels(class_names1, rotation=90)
self.ax1.set_xlabel('Predicted label')
self.ax1.set_ylabel('True label')
for i in range(len(class_names)):
for j in range(len(class_names)):
y_pred_score = self.clf_entropy.predict_proba(X_test)
self.ax1.text(j, i, str(conf_matrix[i][j]))
self.fig.tight_layout()
self.fig.canvas.draw_idle()
#::----------------------------------------
## Graph 2 - ROC Curve
#::----------------------------------------
y_test_bin = pd.get_dummies(y_test).to_numpy()
n_classes = y_test_bin.shape[1]
# From the sckict learn site
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_bin.ravel(), y_pred_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
self.ax2.plot(fpr[1], tpr[1], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[1])
self.ax2.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
self.ax2.set_xlim([0.0, 1.0])
self.ax2.set_ylim([0.0, 1.05])
self.ax2.set_xlabel('False Positive Rate')
self.ax2.set_ylabel('True Positive Rate')
self.ax2.set_title('ROC Curve Random Forest')
self.ax2.legend(loc="lower right")
self.fig2.tight_layout()
self.fig2.canvas.draw_idle()
######################################
# Graph - 3 Feature Importances
#####################################
# get feature importances
importances = self.clf_entropy.feature_importances_
# convert the importances into one-dimensional 1darray with corresponding df column names as axis labels
f_importances = pd.Series(importances, self.list_corr_features.columns)
# sort the array in descending order of the importances, only show the first 10
f_importances.sort_values(ascending=False, inplace=True)
f_importances = f_importances[0:10]
X_Features = f_importances.index
y_Importance = list(f_importances)
self.ax3.barh(X_Features, y_Importance)
self.ax3.set_aspect('auto')
# show the plot
self.fig3.tight_layout()
self.fig3.canvas.draw_idle()
#::-----------------------------------------------------
# Graph 4 - ROC Curve by Class
#::-----------------------------------------------------
str_classes = ['No','Yes']
colors = cycle(['magenta', 'darkorange'])
for i, color in zip(range(n_classes), colors):
self.ax4.plot(fpr[i], tpr[i], color=color, lw=lw,
label='{0} (area = {1:0.2f})'
''.format(str_classes[i], roc_auc[i]))
self.ax4.plot([0, 1], [0, 1], 'k--', lw=lw)
self.ax4.set_xlim([0.0, 1.0])
self.ax4.set_ylim([0.0, 1.05])
self.ax4.set_xlabel('False Positive Rate')
self.ax4.set_ylabel('True Positive Rate')
self.ax4.set_title('ROC Curve by Class')
self.ax4.legend(loc="lower right")
# show the plot
self.fig4.tight_layout()
self.fig4.canvas.draw_idle()
#::-----------------------------------------------------
# Other Models Comparison
#::-----------------------------------------------------
filename2 = 'lr_finalized_model.sav'
self.other_clf_lr = pickle.load(open(filename2, 'rb'))
y_pred_lr = self.other_clf_lr.predict(X_test)
self.accuracy_lr = accuracy_score(y_test, y_pred_lr) * 100
self.txtAccuracy_lr.setText(str(self.accuracy_lr))
filename3 = 'rf_finalized_model.sav'
self.other_clf_rf = pickle.load(open(filename3, 'rb'))
y_pred_rf = self.other_clf_rf.predict(X_test)
self.accuracy_rf = accuracy_score(y_test, y_pred_rf) * 100
self.txtAccuracy_rf.setText(str(self.accuracy_rf))
filename4 = 'gb_finalized_model.sav'
self.other_clf_gb = pickle.load(open(filename4, 'rb'))
y_pred_gb = self.other_clf_gb.predict(X_test)
self.accuracy_gb = accuracy_score(y_test, y_pred_gb) * 100
self.txtAccuracy_gb.setText(str(self.accuracy_gb))
def view_tree(self):
'''
Executes the graphviz to create a tree view of the information
then it presents the graphic in a pdf formt using webbrowser
:return:None
'''
webbrowser.open_new(r'decision_tree_entropy.pdf')
class RandomForest(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(RandomForest, self).__init__()
self.Title = "Random Forest Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Random Forest Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.lblMaxDepth = QLabel('Maximun Depth :')
self.txtMaxDepth = QLineEdit(self)
self.txtMaxDepth.setText("3")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.lblMaxDepth, 20, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtMaxDepth, 20, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_lr = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_dt = QLineEdit()
self.other_models.layout.addRow('Logistic:', self.txtAccuracy_lr)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.other_models.layout.addRow('Decision tree:', self.txtAccuracy_dt)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : Importance of Features
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('Importance of Features')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
if self.feature15.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[15]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[15]]], axis=1)
if self.feature16.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[16]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[16]]], axis=1)
if self.feature17.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[17]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[17]]], axis=1)
if self.feature18.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[18]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[18]]], axis=1)
if self.feature19.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[19]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[19]]], axis=1)
if self.feature20.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[20]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[20]]],axis=1)
if self.feature21.isChecked():
if len(self.list_corr_features) == 20:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[21]]],axis=1)
if self.feature22.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[22]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[22]]],axis=1)
if self.feature23.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[23]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[23]]],axis=1)
if self.feature24.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[24]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[24]]],axis=1)
vtest_per = float(self.txtPercentTest.text())
vmax_depth = float(self.txtMaxDepth.text())
# Clear the graphs to populate them with the new information
self.ax1.clear()
self.ax2.clear()
self.ax3.clear()
self.ax4.clear()
# self.txtResults.clear()
# self.txtResults.setUndoRedoEnabled(False)
vtest_per = vtest_per / 100
filename = 'rf_finalized_model.sav'
self.clf_entropy = pickle.load(open(filename, 'rb'))
y_test = y
X_test = X[features_list]
# -----------------------------------------------------------------------
# predicton on test using entropy
y_pred_entropy = self.clf_entropy.predict(X_test)
# confusion matrix for RandomForest
conf_matrix = confusion_matrix(y_test, y_pred_entropy)
# accuracy score
self.ff_accuracy_score = accuracy_score(y_test, y_pred_entropy) * 100
self.txtCurrentAccuracy.setText(str(self.ff_accuracy_score))
# precision score
self.ff_precision_score = precision_score(y_test, y_pred_entropy) * 100
self.txtCurrentPrecision.setText(str(self.ff_precision_score))
# recall score
self.ff_recall_score = recall_score(y_test, y_pred_entropy) * 100
self.txtCurrentRecall.setText(str(self.ff_recall_score))
# f1_score
self.ff_f1_score = f1_score(y_test, y_pred_entropy)
self.txtCurrentF1score.setText(str(self.ff_f1_score))
#::------------------------------------
## Ghaph1 :
## Confusion Matrix
#::------------------------------------
class_names1 = ['', 'No', 'Yes']
self.ax1.matshow(conf_matrix, cmap=plt.cm.get_cmap('Blues', 14))
self.ax1.set_yticklabels(class_names1)
self.ax1.set_xticklabels(class_names1, rotation=90)
self.ax1.set_xlabel('Predicted label')
self.ax1.set_ylabel('True label')
for i in range(len(class_names)):
for j in range(len(class_names)):
y_pred_score = self.clf_entropy.predict_proba(X_test)
self.ax1.text(j, i, str(conf_matrix[i][j]))
self.fig.tight_layout()
self.fig.canvas.draw_idle()
#::----------------------------------------
## Graph 2 - ROC Curve
#::----------------------------------------
y_test_bin = pd.get_dummies(y_test).to_numpy()
n_classes = y_test_bin.shape[1]
# From the sckict learn site
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_bin.ravel(), y_pred_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
self.ax2.plot(fpr[1], tpr[1], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[1])
self.ax2.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
self.ax2.set_xlim([0.0, 1.0])
self.ax2.set_ylim([0.0, 1.05])
self.ax2.set_xlabel('False Positive Rate')
self.ax2.set_ylabel('True Positive Rate')
self.ax2.set_title('ROC Curve Random Forest')
self.ax2.legend(loc="lower right")
self.fig2.tight_layout()
self.fig2.canvas.draw_idle()
######################################
# Graph - 3 Feature Importances
#####################################
# get feature importances
importances = self.clf_entropy.feature_importances_
# convert the importances into one-dimensional 1darray with corresponding df column names as axis labels
f_importances = pd.Series(importances, self.list_corr_features.columns)
# sort the array in descending order of the importances, only show the first 10
f_importances.sort_values(ascending=False, inplace=True)
f_importances = f_importances[0:10]
X_Features = f_importances.index
y_Importance = list(f_importances)
self.ax3.barh(X_Features, y_Importance)
self.ax3.set_aspect('auto')
# show the plot
self.fig3.tight_layout()
self.fig3.canvas.draw_idle()
#::-----------------------------------------------------
# Graph 4 - ROC Curve by Class
#::-----------------------------------------------------
str_classes = ['No','Yes']
colors = cycle(['magenta', 'darkorange'])
for i, color in zip(range(n_classes), colors):
self.ax4.plot(fpr[i], tpr[i], color=color, lw=lw,
label='{0} (area = {1:0.2f})'
''.format(str_classes[i], roc_auc[i]))
self.ax4.plot([0, 1], [0, 1], 'k--', lw=lw)
self.ax4.set_xlim([0.0, 1.0])
self.ax4.set_ylim([0.0, 1.05])
self.ax4.set_xlabel('False Positive Rate')
self.ax4.set_ylabel('True Positive Rate')
self.ax4.set_title('ROC Curve by Class')
self.ax4.legend(loc="lower right")
# show the plot
self.fig4.tight_layout()
self.fig4.canvas.draw_idle()
#::-----------------------------------------------------
# Other Models Comparison
#::-----------------------------------------------------
filename2 = 'lr_finalized_model.sav'
self.other_clf_lr = pickle.load(open(filename2, 'rb'))
y_pred_lr = self.other_clf_lr.predict(X_test)
self.accuracy_lr = accuracy_score(y_test, y_pred_lr) * 100
self.txtAccuracy_lr.setText(str(self.accuracy_lr))
filename3 = 'dt_finalized_model.sav'
self.other_clf_dt = pickle.load(open(filename3, 'rb'))
y_pred_dt = self.other_clf_dt.predict(X_test)
self.accuracy_dt = accuracy_score(y_test, y_pred_dt) * 100
self.txtAccuracy_dt.setText(str(self.accuracy_dt))
filename4 = 'gb_finalized_model.sav'
self.other_clf_gb = pickle.load(open(filename4, 'rb'))
y_pred_gb = self.other_clf_gb.predict(X_test)
self.accuracy_gb = accuracy_score(y_test, y_pred_gb) * 100
self.txtAccuracy_gb.setText(str(self.accuracy_gb))
class LogisticReg(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(LogisticReg, self).__init__()
self.Title = "Logistic Regression Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Logistic Regression Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_dt = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_rf = QLineEdit()
self.other_models.layout.addRow('Decision Tree:', self.txtAccuracy_dt)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.other_models.layout.addRow('Random Forest:', self.txtAccuracy_rf)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : k-fold Cross validation
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('K-fold cross validation')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features =
|
pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
|
pandas.concat
|
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected):
# GH#38708
series = Series(data)
result = repr(series)
assert result == expected
@pytest.mark.parametrize(
"float_format,expected",
[
("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
],
)
def test_repr_float_format_in_object_col(self, float_format, expected):
# GH#40024
df = Series([1000.0, "test"])
with option_context("display.float_format", float_format):
result = repr(df)
assert result == expected
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_categorical_columns(self):
# GH35439
data = [[4, 2], [3, 2], [4, 3]]
cols = ["aaaaaaaaa", "b"]
df = DataFrame(data, columns=cols)
df_cat_cols = DataFrame(data, columns=pd.CategoricalIndex(cols))
assert df.to_string() == df_cat_cols.to_string()
def test_period(self):
# GH 12615
df = DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
@pytest.mark.parametrize(
"length, max_rows, min_rows, expected",
[
(10, 10, 10, 10),
(10, 10, None, 10),
(10, 8, None, 8),
(20, 30, 10, 30), # max_rows > len(frame), hence max_rows
(50, 30, 10, 10), # max_rows < len(frame), hence min_rows
(100, 60, 10, 10), # same
(60, 60, 10, 60), # edge case
(61, 60, 10, 10), # edge case
],
)
def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
"""Check that display logic is correct.
GH #37359
See description here:
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
"""
formatter = fmt.DataFrameFormatter(
DataFrame(np.random.rand(length, 3)),
max_rows=max_rows,
min_rows=min_rows,
)
result = formatter.max_rows_fitted
assert result == expected
def gen_series_formatting():
s1 = Series(["a"] * 100)
s2 = Series(["ab"] * 100)
s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(["\u03c3"] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = "%.4f".__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == "Series([], Freq: B)"
result = self.ts[:0].to_string(length=0)
assert result == "Series([], Freq: B)"
# name and length
cp = self.ts.copy()
cp.name = "foo"
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split("\n")[-1].strip()
assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Series(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_to_string_mixed(self):
s = Series(["foo", np.nan, -1.23, 4.56])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Series(["foo", np.nan, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Series(["foo", 5, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.to_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = "1\n" + "2\n" + "3\n" + "4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444],
index=[1, "AB", Timestamp("2011-01-01"), "あああ"],
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n"
" ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# ambiguous unicode
s = Series(
["¡¡", "い¡¡", "ううう", "ええええ"], index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"]
)
expected = (
"ああ ¡¡\n"
"¡¡¡¡いい い¡¡\n"
"¡¡ ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
for line in repr(Series(vals)).split("\n"):
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert "+010" in line
else:
assert "+10" in line
def test_datetimeindex(self):
index = date_range("20130102", periods=6)
s =
|
Series(1, index=index)
|
pandas.Series
|
#!/usr/bin/env python
# coding: utf-8
# # This will create plots for institutions of universities in THE WUR univs only and for the period of 2007-2017. The input dataset contains info of THE WUR univs only but for any period of time.
# #### The unpaywall dump used was from (April or June) 2018; hence analysis until 2017 only is going to be included.
# ## Question : What is the distribution of incoming citation counts for OA and non-OA papers published by THE WUR univ within each country?
# In[1]:
# standard path wrangling to be able to import project config and sources
import os
import sys
from os.path import join
root = os.path.dirname(os.getcwd())
sys.path.append(root)
print('Project root: {}'.format(root))
# In[2]:
sys.path.append(join(root,"spark/shared/"))
from MAG_utils import *
# In[ ]:
# In[3]:
# Built-in
import json
# Installed
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib import rc,rcParams
from matplotlib.patches import Rectangle
import unicodedata
import re
from statistics import mean
# In[4]:
cfg = None
with open(join(root,"spark/config.json")) as fp:
cfg = json.load(fp)
# In[5]:
# cfg
# In[6]:
cnames_for_plot = {
"austria" : "Austria",
"brazil" : "Brazil",
"germany" : "Germany",
"india" : "India",
"portugal" : "Portugal",
"russia" : "Russia",
"uk" : "UK",
"usa" : "USA"
}
# In[7]:
output_dir = join(root,"documents/analysis/dataset_selection_question5")
# In[ ]:
# Create a new directory to save results
os.makedirs(output_dir)
# In[8]:
study_years = [2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017]
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# # Extraction of Citation Counts of OA and unknown papers for each university
# In[9]:
def get_univ_papers_citation_counts(country_papers_OA_df, univs_name):
'''
Get the plot of count of citations for both OA and non-OA papers for each university in the input country
'''
univs_info = {}
univs_not_found = []
univs_found = []
for org_univ_name in set(univs_name): # remove duplicate univ names in the THE list, if any
# print(org_univ_name)
THE_univ_name_normalised = mag_normalisation_institution_names(org_univ_name)
'''
The dataframe that will be selected for the current univ is either :
1. When the MAG normalizedname column matches to THE_univ_name_normalised
or
2. When the MAG normalised(wikiname) matches to THE_univ_name_normalised -- this matches English names (in MAG wiki links as well as THE) of non English name (in MAG normalisedname or displayname) universities.
'''
univ_papers_df_set1 = country_papers_OA_df[country_papers_OA_df['normalizedname']==THE_univ_name_normalised]
univ_papers_df_set2 = country_papers_OA_df[country_papers_OA_df['normalizedwikiname']==THE_univ_name_normalised]
# The records in two sets can be the excatly the same
# Concat and remove exact duplicates -- https://stackoverflow.com/a/21317570/530399
univ_papers_df = pd.concat([univ_papers_df_set1, univ_papers_df_set2]).drop_duplicates().reset_index(drop=True)
# Put additional criteria that these papers are from 2007 till 2017
univ_papers_df = univ_papers_df[univ_papers_df['year'].isin(study_years)]
# Same paper will have multiple entries if there are multiple authors for that paper from same university.
# This is not necessary because the input dataset was already prepared to exclude such duplicates.
# univ_papers_df = univ_papers_df.drop_duplicates(subset="paperid")
count_total_univ_papers = len(univ_papers_df)
# For those I couldn't match/find their name, it is not fair to say that their OA count is 0. Should be excluded from the graph.
if count_total_univ_papers==0:
univs_not_found.append(org_univ_name+" @ "+THE_univ_name_normalised)
else:
univs_found.append(org_univ_name)
univs_info[org_univ_name] = {}
OA_univ_papers_df = univ_papers_df[univ_papers_df['is_OA']=="true"] # stored as a string in csv
unknown_univ_papers_df = univ_papers_df[univ_papers_df['is_OA']!="true"] # stored as a string in csv
# Get the total count of citations for OA and unknown papers -- int casting needed to convert numpy int (json-incompatible) to python int
univs_info[org_univ_name]["citationcount_OA_papers"] = int(OA_univ_papers_df['citationcount'].sum())
univs_info[org_univ_name]["citationcount_unknown_papers"] = int(unknown_univ_papers_df['citationcount'].sum())
return univs_info, univs_not_found, univs_found
# In[10]:
all_countries_all_univs_OA_info = {}
all_countries_univs_found_not_found = {}
for country_name,univs_name in cfg['data']['all_THE_WUR_institutions_by_country'].items():
print("\nProcesing for dataset of univs in "+country_name+"\n")
all_countries_univs_found_not_found[country_name] = {}
# CSV has repeated header from multiple partitions of the merge on pyspark csv output. Hence need to treat as string.
country_papers_OA_df = pd.read_csv(join(root,"data/processed/cc_oa_"+country_name+"_papers.csv"), header=0, sep=",", dtype={'is_OA': object, "url_lists_as_string": object, "year": object, "wikipage": object, "normalizedwikiname": object, "citationcount": object}) # object means string
# Then eliminate problematic lines
# temp fix until spark csv merge header issue is resolved -- the header line is present in each re-partition's output csv
country_papers_OA_df.drop(country_papers_OA_df[country_papers_OA_df.paperid == "paperid"].index, inplace=True)
# Then reset dtypes as needed.
country_papers_OA_df = country_papers_OA_df.astype({'year':int}) # todo : for other types too including is_OA and update the check method to boolean type
country_papers_OA_df = country_papers_OA_df.astype({'citationcount':int})
univs_info, univs_not_found, univs_found = get_univ_papers_citation_counts(country_papers_OA_df, univs_name)
all_countries_all_univs_OA_info[country_name] = univs_info
count_total_univs = len(univs_not_found) + len(univs_found)
not_found_details = {}
not_found_details['univ_names'] = univs_not_found
not_found_details['count_univs'] = len(univs_not_found)
not_found_details['percent_univs'] = (len(univs_not_found)*100.00)/count_total_univs
found_details = {}
found_details['univ_names'] = univs_found
found_details['count_univs'] = len(univs_found)
found_details['percent_univs'] = (len(univs_found)*100.00)/count_total_univs
all_details = {}
all_details['count_univs'] = count_total_univs
all_countries_univs_found_not_found[country_name]['not_found'] = not_found_details
all_countries_univs_found_not_found[country_name]['found'] = found_details
all_countries_univs_found_not_found[country_name]['all'] = all_details
print("Computed citation counts for all univs in "+country_name+"\n")
# In[11]:
# Write text files with the infos
with open(join(output_dir,'all_countries_univs_found_not_found.txt'), 'w') as file:
file.write(json.dumps(all_countries_univs_found_not_found, sort_keys=True, indent=4, ensure_ascii=False))
with open(join(output_dir,'all_countries_all_univs_cc_info.txt'), 'w') as file:
file.write(json.dumps(all_countries_all_univs_OA_info, sort_keys=True, indent=4, ensure_ascii=False))
# In[ ]:
# # Load data from previously saved files
# In[12]:
with open(join(output_dir,'all_countries_all_univs_cc_info.txt')) as file:
all_countries_all_univs_OA_info = json.load(file)
# all_countries_all_univs_OA_info
# # Create bar plot for each of the countries
# In[13]:
def label_bar_with_value(ax, rects, value_labels):
"""
Attach a text label above each bar displaying its height
"""
for i in range(len(rects)):
rect = rects[i]
label_value = value_labels[i]
ax.text(rect.get_x() + rect.get_width()/2., 1.05*rect.get_height(),
'%s' % label_value,
ha='center', va='bottom')
def create_citation_count_distribution_bar_chart(univs_details, save_fname, x_label, save_file=True):
# https://chrisalbon.com/python/data_visualization/matplotlib_grouped_bar_plot/
# https://stackoverflow.com/a/42498711/530399
univs_name = [x for x in univs_details.keys()]
univs_data = univs_details.values()
univs_oa_citation_counts = [x['citationcount_OA_papers'] for x in univs_data]
univs_unknown_citation_counts = [x['citationcount_unknown_papers'] for x in univs_data]
raw_data = {'univs_name': univs_name,
'univs_oa_citation_counts': univs_oa_citation_counts,
'univs_unknown_citation_counts': univs_unknown_citation_counts
}
df =
|
pd.DataFrame(raw_data, columns = ['univs_name', 'univs_oa_citation_counts', 'univs_unknown_citation_counts'])
|
pandas.DataFrame
|
import datetime
import functools
import os
from urllib.parse import urljoin
import arcgis
import geopandas
import numpy
import pandas
import requests
from airflow import DAG
from airflow.hooks.base_hook import BaseHook
from airflow.models import Variable
from airflow.operators.python_operator import PythonOperator
from airflow.utils.email import send_email
from arcgis.gis import GIS
API_BASE_URL = "https://api2.gethelp.com/v1/"
FACILITIES_ID = "dd618cab800549358bac01bf218406e4"
STATS_ID = "9db2e26c98134fae9a6f5c154a1e9ac9"
TIMESERIES_ID = "bd17014f8a954681be8c383acdb6c808"
COUNCIL_DISTRICTS = (
"https://opendata.arcgis.com/datasets/"
"76104f230e384f38871eb3c4782f903d_13.geojson"
)
def download_council_districts():
r = requests.get(COUNCIL_DISTRICTS)
fname = "/tmp/council-districts.geojson"
with open(fname, "wb") as f:
f.write(r.content)
return fname
def coerce_integer(df):
"""
Loop through the columns of a df, if it is numeric,
convert it to integer and fill nans with zeros.
This is somewhat heavy-handed in an attempt to force
Esri to recognize sparse columns as integers.
"""
# Numeric columns to not coerce to integer
EXCEPT = ["latitude", "longitude", "zipCode"]
def numeric_column_to_int(series):
return (
series.fillna(0).astype(int)
if pandas.api.types.is_numeric_dtype(series) and series.name not in EXCEPT
else series
)
return df.transform(numeric_column_to_int, axis=0)
def upload_to_esri(df, layer_id, filename="/tmp/df.csv"):
"""
A quick helper function to upload a data frame
to ESRI as a featurelayer backed CSV
recommend: no geometries, lat/long columns
remember ESRI is UTC only.
"""
df.to_csv(filename, index=False)
# Login to ArcGIS
arcconnection = BaseHook.get_connection("arcgis")
arcuser = arcconnection.login
arcpassword = arcconnection.password
gis = GIS("http://lahub.maps.arcgis.com", username=arcuser, password=arcpassword)
gis_item = gis.content.get(layer_id)
gis_layer_collection = arcgis.features.FeatureLayerCollection.fromitem(gis_item)
gis_layer_collection.manager.overwrite(filename)
os.remove(filename)
return True
def make_get_help_request(api_path, token, params={}, paginated=True):
"""
Makes an API request to the GetHelp platform.
Also handles depagination of long responses.
Parameters
==========
api_path: string
The path to query
token: string
The OAuth bearer token
params: dict
Any additional query parameters to pass
paginated: boolean
Whether the response is expected to be a list of paginated results
with a "content" field. In this case, the function will depaginate
the results. If false, it will return the raw JSON.
Returns
=======
The depaginated JSON response in the "content" field, or the raw JSON response.
"""
endpoint = urljoin(API_BASE_URL, api_path)
if paginated:
content = []
page = 0
while True:
r = requests.get(
endpoint,
headers={"Authorization": f"Bearer {token}"},
params=dict(page=page, **params),
)
res = r.json()
content = content + res["content"]
if res["last"] is True:
break
else:
page = page + 1
return content
else:
r = requests.get(
endpoint, headers={"Authorization": f"Bearer {token}"}, params=params,
)
return r.json()
def get_facilities():
"""
Get the current facilties and their status.
Returns
=======
A dataframe with the current facilities.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
res = make_get_help_request("facility-groups/1/facilities", TOKEN)
df = pandas.io.json.json_normalize(res)
df = pandas.concat(
[df, df.apply(lambda x: get_client_stats(x["id"]), axis=1)], axis=1,
)
df = pandas.concat(
[df, df.apply(lambda x: get_facility_program_status(x["id"]), axis=1)], axis=1,
)
council_districts = geopandas.read_file(
download_council_districts(), driver="GeoJSON"
)[["geometry", "District"]]
df = geopandas.GeoDataFrame(
df,
geometry=geopandas.points_from_xy(df.longitude, df.latitude),
crs={"init": "epsg:4326"},
)
df = df.assign(
district=df.apply(
lambda x: council_districts[council_districts.contains(x.geometry)]
.iloc[0]
.District,
axis=1,
)
).drop(columns=["geometry"])
return df
def get_client_stats(facility_id):
"""
Given a facility ID, get the current client status.
Parameters
==========
facility_id: int
The facility ID
Returns
=======
A pandas.Series with the client statistics for the facility.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
res = make_get_help_request(
f"facilities/{facility_id}/client-statistics", TOKEN, paginated=False,
)
return (
pandas.Series({**res, **res["genderStats"], **res["clientEvents"]})
.drop(["genderStats", "clientEvents"])
.astype(int)
)
def get_program_client_stats(facility_id, program_id):
"""
Given a facility ID and a program ID, get the current client status.
Parameters
==========
facility_id: int
The facility ID
program_id: int
The program ID
Returns
=======
A pandas.Series with the client statistics for the facility program.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
res = make_get_help_request(
f"facilities/{facility_id}/facility-programs/{program_id}/client-statistics",
TOKEN,
paginated=False,
)
return (
pandas.Series({**res, **res["genderStats"], **res["clientEvents"]})
.drop(["genderStats", "clientEvents"])
.astype(int)
)
def agg_facility_programs(facility_id, program_list, match, prefix):
"""
Aggregate the current bed occupancy data for a list of programs,
filtering by program name.
Parameters
==========
facility_id: int
The facility id.
program_list: list
A list of programs of the shape returned by the GetHelp
facility-programs endpoint.
match: str
A string which is tested for inclusion in a program name
to decide whether to include a program in the statistics.
prefix:
A string to prefix series labels with.
Returns
=======
A pandas.Series with the aggregated statistics for the matching facility programs.
"""
# A sentinel timestamp which is used to determine whether
# any programs actually matched.
sentinel = pandas.Timestamp("2020-01-01T00:00:00Z")
last_updated = functools.reduce(
lambda x, y: (
max(x, pandas.Timestamp(y["lastUpdated"]))
if match in y["name"].lower()
else x
),
program_list,
sentinel,
)
if last_updated == sentinel:
# No programs matched, return early
return None
occupied = functools.reduce(
lambda x, y: x
+ (y["bedsOccupied"] + y["bedsPending"] if match in y["name"].lower() else 0),
program_list,
0,
)
total = functools.reduce(
lambda x, y: x + (y["bedsTotal"] if match in y["name"].lower() else 0),
program_list,
0,
)
available = total - occupied
client_stats = functools.reduce(
lambda x, y: x.add(
get_program_client_stats(facility_id, y["id"]), fill_value=0,
)
if match in y["name"].lower()
else x,
program_list,
pandas.Series(),
)
return pandas.Series(
{
prefix + "occupied": occupied,
prefix + "available": available,
prefix + "last_updated": last_updated,
}
).append(client_stats.rename(lambda x: prefix + x))
def get_facility_program_status(facility_id):
"""
Get the most recent status for a facility, broken
up into shelter beds, trailers, and safe parking.
Parameters
==========
facility_id: int
The facility ID.
Returns
=======
A pandas.Series with program statistics for shelter beds, safe
parking, and trailer beds.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
res = make_get_help_request(f"facilities/{facility_id}/facility-programs", TOKEN)
shelter_beds = agg_facility_programs(
facility_id, res, "shelter bed", "shelter_beds_"
)
isolation = agg_facility_programs(facility_id, res, "isolation", "isolation_")
trailers = agg_facility_programs(facility_id, res, "trailer", "trailers_")
safe_parking = agg_facility_programs(facility_id, res, "parking", "safe_parking_")
return pandas.concat([shelter_beds, isolation, trailers, safe_parking])
def get_facility_history(facility_id, start_date=None, end_date=None):
"""
Get the history stats of a given facility by ID.
Parameters
==========
facility_id: int
The ID of the facility.
start_date: datetime.date
The start date of the history (defaults to April 8, 2020)
end_date: datetme.date
The end date of the history (defaults to the present day)
Returns
=======
A dataframe with the history for the given facility.
"""
TOKEN = Variable.get("GETHELP_OAUTH_PASSWORD")
start_date = start_date or datetime.date(2020, 4, 8)
end_date = end_date or pandas.Timestamp.now(tz="US/Pacific").date()
# Get the shelter bed program ID
res = make_get_help_request(f"facilities/{facility_id}/facility-programs", TOKEN)
programs = pandas.io.json.json_normalize(res)
history = pandas.DataFrame()
if not len(programs):
return history
# Get the history stats for the shelter bed programs
for _, program in programs.iterrows():
program_id = program["id"]
res = make_get_help_request(
f"facilities/{facility_id}/facility-programs/{program_id}/statistics",
TOKEN,
params={"startDate": str(start_date), "endDate": str(end_date)},
)
program_history =
|
pandas.io.json.json_normalize(res)
|
pandas.io.json.json_normalize
|
# --------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Code starts here
df =
|
pd.read_csv(path)
|
pandas.read_csv
|
#!/usr/bin/env python
import logging
import aiohttp
import asyncio
from tqdm.asyncio import tqdm_asyncio
from tqdm.contrib.logging import logging_redirect_tqdm
import pandas as pd
import numpy as np
import time
import datetime as dt
from typing import Collection, Dict, List, Optional, Tuple, Union
from yahoo_finance import _download_single_ticker_chart_data, download_ticker_sector_industry
logger = logging.getLogger(__name__)
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"
}
async def download_tickers_sector_industry(tickers: List[str]) -> pd.DataFrame:
async with aiohttp.ClientSession(headers=HEADERS) as session:
print("\nDownloading stock industry and sector")
with logging_redirect_tqdm():
tickers_info = await tqdm_asyncio.gather(
*[download_ticker_sector_industry(session, ticker) for ticker in tickers]
)
if None in tickers_info:
errored_tickers = [ticker for ticker, ticker_info in zip(tickers, tickers_info) if ticker_info is None]
tickers_info = [ticker_info for ticker_info in tickers_info if ticker_info is not None]
print(f"Out of {len(tickers)} tickers missing info, we could get {len(tickers_info)}")
print(f"Couldn't get info for the following {len(errored_tickers)}: {', '.join(errored_tickers)}")
return pd.DataFrame(tickers_info, columns=["SYMBOL", "SECTOR", "INDUSTRY"])
async def download_tickers_quotes(
tickers: List[str], start_date: int, end_date: int, interval: str
) -> Tuple[pd.DataFrame, Dict]:
"""Download quotes and their currencies for all the specified tickers in the specified time window.
Parameters
----------
tickers : List[str]
The list of tickers to download data for
start_date : int
The start date in POSIX format.
end_date : int
The end date in POSIX format.
interval : str
The interval between each data point (e.g. "1d")
Returns
-------
Tuple[List[Dict], Dict]
A tuple containg two dicts, first the quotes, second their currencies.
"""
async with aiohttp.ClientSession(headers=HEADERS) as session:
print("\nDownloading stock quotes")
with logging_redirect_tqdm():
tickers_chart_data = await tqdm_asyncio.gather(
*[
_download_single_ticker_chart_data(session, ticker, start_date, end_date, interval)
for ticker in tickers
]
)
if None in tickers_chart_data:
errored_tickers = [ticker for ticker, ticker_info in zip(tickers, tickers_chart_data) if ticker_info is None]
tickers_chart_data = [t for t in tickers_chart_data if t is not None]
print(f"Out of {len(tickers)} tickers, we could get quotes for {len(tickers_chart_data)}")
print(f"Couldn't get quotes for: {', '.join(errored_tickers)}")
quotes = {ticker_dict["ticker"]: ticker_dict["quotes"] for ticker_dict in tickers_chart_data}
currencies = {ticker_dict["ticker"]: ticker_dict["currency"] for ticker_dict in tickers_chart_data}
return pd.concat(quotes, axis="columns", sort=True), currencies
def extract_ticker_list(tickers: Union[Collection[str], str]) -> List[str]:
if isinstance(tickers, (list, set, tuple)):
pass
elif isinstance(tickers, str):
# Replacing commas by spaces helps removing excess spaces between commas if any
tickers = tickers.replace(",", " ").split()
else:
raise ValueError("tickers must be a str consisting of a comma separated list of tickers or a list of tickers")
return list(set([ticker.upper() for ticker in tickers]))
def parse_start_end_date(
start_date: Optional[str] = None, end_date: Optional[str] = None, default_start_days_ago=365
) -> Tuple[int, int]:
end_date = int(time.time()) if end_date is None else int(dt.datetime.strptime(end_date, "%Y-%m-%d").timestamp())
start_date = (
int((dt.datetime.today() - dt.timedelta(365)).timestamp())
if start_date is None
else int(dt.datetime.strptime(start_date, "%Y-%m-%d").timestamp())
)
return start_date, end_date
def download_tickers_info(
tickers: list, start_date: Optional[str] = None, end_date: Optional[str] = None, interval: str = "1d"
) -> dict:
"""
Download historical data for tickers in the list.
Parameters
----------
tickers: list
Tickers for which to download historical information.
start: str or int
Start download data from this date.
end: str or int
End download data at this date.
interval: str
Frequency between data.
Returns
-------
data: dict
Dictionary including the following keys:
- tickers: list of tickers
- logp: array of log-adjusted closing prices, shape=(num stocks, length period);
- volume: array of volumes, shape=(num stocks, length period);
- sectors: dictionary of stock sector for each ticker;
- industries: dictionary of stock industry for each ticker.
"""
logger.info(f"Downloading data for {len(tickers)} tickers")
tickers = extract_ticker_list(tickers)
stock_info_filename = "stock_info.csv"
try:
stock_info_df = pd.read_csv(stock_info_filename)
logger.info(f"Reading stock info found in file '{stock_info_filename}'")
except FileNotFoundError:
# Creating an empty dataframe
stock_info_columns = ["SYMBOL", "CURRENCY", "SECTOR", "INDUSTRY"]
stock_info_df =
|
pd.DataFrame(columns=stock_info_columns)
|
pandas.DataFrame
|
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
dense = arr.to_dense()
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype('float32'))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0., 2.],
dtype=dtype.subtype),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0, 2], dtype=np.int64),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with pytest.raises(ValueError, match='NA'):
arr.astype('Sparse[i8]')
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True],
dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray([True, False, False, True],
dtype=SparseDtype(bool, False))
tm.assert_sp_array_equal(result, expected)
def test_astype_all(self, any_real_dtype):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
typ = np.dtype(any_real_dtype)
res = arr.astype(typ)
assert res.dtype == SparseDtype(typ, 1)
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(np.asarray(res.to_dense()),
vals.astype(typ))
@pytest.mark.parametrize('array, dtype, expected', [
(SparseArray([0, 1]), 'float',
SparseArray([0., 1.], dtype=SparseDtype(float, 0.0))),
(SparseArray([0, 1]), bool, SparseArray([False, True])),
(SparseArray([0, 1], fill_value=1), bool,
SparseArray([False, True], dtype=SparseDtype(bool, True))),
pytest.param(
SparseArray([0, 1]), 'datetime64[ns]',
SparseArray(np.array([0, 1], dtype='datetime64[ns]'),
dtype=SparseDtype('datetime64[ns]',
pd.Timestamp('1970'))),
marks=[pytest.mark.xfail(reason="NumPy-7619")],
),
(SparseArray([0, 1, 10]), str,
SparseArray(['0', '1', '10'], dtype=SparseDtype(str, '0'))),
(SparseArray(['10', '20']), float, SparseArray([10.0, 20.0])),
(SparseArray([0, 1, 0]), object,
SparseArray([0, 1, 0], dtype=SparseDtype(object, 0))),
])
def test_astype_more(self, array, dtype, expected):
result = array.astype(dtype)
tm.assert_sp_array_equal(result, expected)
def test_astype_nan_raises(self):
arr = SparseArray([1.0, np.nan])
with pytest.raises(ValueError, match='Cannot convert non-finite'):
arr.astype(int)
def test_set_fill_value(self):
arr = SparseArray([1., np.nan, 2.], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# XXX: this seems fine? You can construct an integer
# sparsearray with NaN fill value, why not update one?
# coerces to int
# msg = "unable to set fill_value 3\\.1 to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 3.1
assert arr.fill_value == 3.1
# msg = "unable to set fill_value nan to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
# msg = "unable to set fill_value 0 to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 0
assert arr.fill_value == 0
# msg = "unable to set fill_value nan to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
@pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)])
def test_set_fill_invalid_non_scalar(self, val):
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
msg = "fill_value must be a scalar"
with pytest.raises(ValueError, match=msg):
arr.fill_value = val
def test_copy(self):
arr2 = self.arr.copy()
assert arr2.sp_values is not self.arr.sp_values
assert arr2.sp_index is self.arr.sp_index
def test_values_asarray(self):
assert_almost_equal(self.arr.to_dense(), self.arr_data)
@pytest.mark.parametrize('data,shape,dtype', [
([0, 0, 0, 0, 0], (5,), None),
([], (0,), None),
([0], (1,), None),
(['A', 'A', np.nan, 'B'], (4,), np.object)
])
def test_shape(self, data, shape, dtype):
# GH 21126
out = SparseArray(data, dtype=dtype)
assert out.shape == shape
@pytest.mark.parametrize("vals", [
[np.nan, np.nan, np.nan, np.nan, np.nan],
[1, np.nan, np.nan, 3, np.nan],
[1, np.nan, 0, 3, 0],
])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_dense_repr(self, vals, fill_value):
vals = np.array(vals)
arr = SparseArray(vals, fill_value=fill_value)
res = arr.to_dense()
tm.assert_numpy_array_equal(res, vals)
with tm.assert_produces_warning(FutureWarning):
res2 = arr.get_values()
tm.assert_numpy_array_equal(res2, vals)
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.to_dense()[i])
for i in range(len(self.arr)):
_checkit(i)
_checkit(-i)
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
def test_getslice(self):
result = self.arr[:-3]
exp = SparseArray(self.arr.to_dense()[:-3])
tm.assert_sp_array_equal(result, exp)
result = self.arr[-4:]
exp = SparseArray(self.arr.to_dense()[-4:])
tm.assert_sp_array_equal(result, exp)
# two corner cases from Series
result = self.arr[-12:]
exp = SparseArray(self.arr)
tm.assert_sp_array_equal(result, exp)
result = self.arr[:-12]
exp = SparseArray(self.arr.to_dense()[:0])
tm.assert_sp_array_equal(result, exp)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ], fill_value=0)
tm.assert_sp_array_equal(res, exp)
with pytest.raises(IndexError):
sparse[4:, :]
with pytest.raises(IndexError):
# check numpy compat
dense[4:, :]
def test_boolean_slice_empty(self):
arr = pd.SparseArray([0, 1, 2])
res = arr[[False, False, False]]
assert res.dtype == arr.dtype
@pytest.mark.parametrize("op", ["add", "sub", "mul",
"truediv", "floordiv", "pow"])
def test_binary_operators(self, op):
op = getattr(operator, op)
data1 = np.random.randn(20)
data2 = np.random.randn(20)
data1[::2] = np.nan
data2[::3] = np.nan
arr1 = SparseArray(data1)
arr2 = SparseArray(data2)
data1[::2] = 3
data2[::3] = 3
farr1 = SparseArray(data1, fill_value=3)
farr2 = SparseArray(data2, fill_value=3)
def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(op(first.to_dense(), second.to_dense()),
fill_value=first.fill_value)
assert isinstance(res, SparseArray)
assert_almost_equal(res.to_dense(), exp.to_dense())
res2 = op(first, second.to_dense())
assert isinstance(res2, SparseArray)
tm.assert_sp_array_equal(res, res2)
res3 = op(first.to_dense(), second)
assert isinstance(res3, SparseArray)
tm.assert_sp_array_equal(res, res3)
res4 = op(first, 4)
assert isinstance(res4, SparseArray)
# Ignore this if the actual op raises (e.g. pow).
try:
exp = op(first.to_dense(), 4)
exp_fv = op(first.fill_value, 4)
except ValueError:
pass
else:
assert_almost_equal(res4.fill_value, exp_fv)
assert_almost_equal(res4.to_dense(), exp)
with np.errstate(all="ignore"):
for first_arr, second_arr in [(arr1, arr2), (farr1, farr2)]:
_check_op(op, first_arr, second_arr)
def test_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
tm.assert_sp_array_equal(unpickled, obj)
_check_roundtrip(self.arr)
_check_roundtrip(self.zarr)
def test_generator_warnings(self):
sp_arr = SparseArray([1, 2, 3])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(action='always',
category=DeprecationWarning)
warnings.filterwarnings(action='always',
category=PendingDeprecationWarning)
for _ in sp_arr:
pass
assert len(w) == 0
def test_fillna(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0])
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan])
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
# float dtype's fill_value is np.nan, replaced by -1
s = SparseArray([0., 0., 0., 0.])
res = s.fillna(-1)
exp = SparseArray([0., 0., 0., 0.], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
# int dtype shouldn't have missing. No changes.
s = SparseArray([0, 0, 0, 0])
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
tm.assert_sp_array_equal(res, s)
s = SparseArray([0, 0, 0, 0], fill_value=0)
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=0)
tm.assert_sp_array_equal(res, exp)
# fill_value can be nan if there is no missing hole.
# only fill_value will be changed
s = SparseArray([0, 0, 0, 0], fill_value=np.nan)
assert s.dtype == SparseDtype(np.int64, fill_value=np.nan)
assert np.isnan(s.fill_value)
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
def test_fillna_overlap(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
# filling with existing value doesn't replace existing value with
# fill_value, i.e. existing 3 remains in sp_values
res = s.fillna(3)
exp = np.array([1, 3, 3, 3, 3], dtype=np.float64)
tm.assert_numpy_array_equal(res.to_dense(), exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(3)
exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
def test_nonzero(self):
# Tests regression #21172.
sa = pd.SparseArray([
float('nan'),
float('nan'),
1, 0, 0,
2, 0, 0, 0,
3, 0, 0
])
expected = np.array([2, 5, 9], dtype=np.int32)
result, = sa.nonzero()
tm.assert_numpy_array_equal(expected, result)
sa = pd.SparseArray([0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 0, 0])
result, = sa.nonzero()
tm.assert_numpy_array_equal(expected, result)
class TestSparseArrayAnalytics:
@pytest.mark.parametrize('data,pos,neg', [
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0)
])
def test_all(self, data, pos, neg):
# GH 17570
out = SparseArray(data).all()
assert out
out = SparseArray(data, fill_value=pos).all()
assert out
data[1] = neg
out = SparseArray(data).all()
assert not out
out = SparseArray(data, fill_value=pos).all()
assert not out
@pytest.mark.parametrize('data,pos,neg', [
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0)
])
@td.skip_if_np_lt("1.15") # prior didn't dispatch
def test_numpy_all(self, data, pos, neg):
# GH 17570
out = np.all(SparseArray(data))
assert out
out = np.all(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.all(SparseArray(data))
assert not out
out = np.all(SparseArray(data, fill_value=pos))
assert not out
# raises with a different message on py2.
msg = "the \'out\' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.all(SparseArray(data), out=np.array([]))
@pytest.mark.parametrize('data,pos,neg', [
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0)
])
def test_any(self, data, pos, neg):
# GH 17570
out = SparseArray(data).any()
assert out
out = SparseArray(data, fill_value=pos).any()
assert out
data[1] = neg
out = SparseArray(data).any()
assert not out
out = SparseArray(data, fill_value=pos).any()
assert not out
@pytest.mark.parametrize('data,pos,neg', [
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0)
])
@td.skip_if_np_lt("1.15") # prior didn't dispatch
def test_numpy_any(self, data, pos, neg):
# GH 17570
out = np.any(SparseArray(data))
assert out
out = np.any(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.any(SparseArray(data))
assert not out
out = np.any(SparseArray(data, fill_value=pos))
assert not out
msg = "the \'out\' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.any(SparseArray(data), out=out)
def test_sum(self):
data = np.arange(10).astype(float)
out = SparseArray(data).sum()
assert out == 45.0
data[5] = np.nan
out = SparseArray(data, fill_value=2).sum()
assert out == 40.0
out = SparseArray(data, fill_value=np.nan).sum()
assert out == 40.0
def test_numpy_sum(self):
data = np.arange(10).astype(float)
out = np.sum(SparseArray(data))
assert out == 45.0
data[5] = np.nan
out = np.sum(SparseArray(data, fill_value=2))
assert out == 40.0
out = np.sum(SparseArray(data, fill_value=np.nan))
assert out == 40.0
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.sum(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.sum(SparseArray(data), out=out)
@pytest.mark.parametrize("data,expected", [
(np.array([1, 2, 3, 4, 5], dtype=float), # non-null data
SparseArray(np.array([1.0, 3.0, 6.0, 10.0, 15.0]))),
(np.array([1, 2, np.nan, 4, 5], dtype=float), # null data
SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0])))
])
@pytest.mark.parametrize("numpy", [True, False])
def test_cumsum(self, data, expected, numpy):
cumsum = np.cumsum if numpy else lambda s: s.cumsum()
out = cumsum(SparseArray(data))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=np.nan))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=2))
tm.assert_sp_array_equal(out, expected)
if numpy: # numpy compatibility checks.
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(SparseArray(data), out=out)
else:
axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid.
msg = "axis\\(={axis}\\) out of bounds".format(axis=axis)
with pytest.raises(ValueError, match=msg):
SparseArray(data).cumsum(axis=axis)
def test_mean(self):
data = np.arange(10).astype(float)
out = SparseArray(data).mean()
assert out == 4.5
data[5] = np.nan
out = SparseArray(data).mean()
assert out == 40.0 / 9
def test_numpy_mean(self):
data = np.arange(10).astype(float)
out = np.mean(SparseArray(data))
assert out == 4.5
data[5] = np.nan
out = np.mean(SparseArray(data))
assert out == 40.0 / 9
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.mean(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.mean(SparseArray(data), out=out)
def test_ufunc(self):
# GH 13853 make sure ufunc is applied to fill_value
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([1, np.nan, 2, np.nan, 2])
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=-1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2]))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0))
tm.assert_sp_array_equal(np.sin(sparse), result)
def test_ufunc_args(self):
# GH 13853 make sure ufunc is applied to fill_value, including its arg
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([2, np.nan, 3, np.nan, -1])
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([2, 0, 3, -1], fill_value=2)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray([2, 0, 1, -1], fill_value=1)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
@pytest.mark.parametrize('fill_value', [0.0, np.nan])
def test_modf(self, fill_value):
# https://github.com/pandas-dev/pandas/issues/26946
sparse = pd.SparseArray([fill_value] * 10 + [1.1, 2.2],
fill_value=fill_value)
r1, r2 = np.modf(sparse)
e1, e2 = np.modf(np.asarray(sparse))
tm.assert_sp_array_equal(r1, pd.SparseArray(e1, fill_value=fill_value))
tm.assert_sp_array_equal(r2, pd.SparseArray(e2, fill_value=fill_value))
def test_nbytes_integer(self):
arr = SparseArray([1, 0, 0, 0, 2], kind='integer')
result = arr.nbytes
# (2 * 8) + 2 * 4
assert result == 24
def test_nbytes_block(self):
arr = SparseArray([1, 2, 0, 0, 0], kind='block')
result = arr.nbytes
# (2 * 8) + 4 + 4
# sp_values, blocs, blenghts
assert result == 24
def test_asarray_datetime64(self):
s = pd.SparseArray(
pd.to_datetime(['2012', None, None, '2013'])
)
np.asarray(s)
def test_density(self):
arr = SparseArray([0, 1])
assert arr.density == 0.5
def test_npoints(self):
arr = SparseArray([0, 1])
assert arr.npoints == 1
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestAccessor:
@pytest.mark.parametrize('attr', [
'npoints', 'density', 'fill_value', 'sp_values',
])
def test_get_attributes(self, attr):
arr = SparseArray([0, 1])
ser = pd.Series(arr)
result = getattr(ser.sparse, attr)
expected = getattr(arr, attr)
assert result == expected
@td.skip_if_no_scipy
def test_from_coo(self):
import scipy.sparse
row = [0, 3, 1, 0]
col = [0, 3, 1, 2]
data = [4, 5, 7, 9]
sp_array = scipy.sparse.coo_matrix((data, (row, col)))
result = pd.Series.sparse.from_coo(sp_array)
index = pd.MultiIndex.from_arrays([[0, 0, 1, 3], [0, 2, 1, 3]])
expected = pd.Series([4, 9, 7, 5], index=index, dtype='Sparse[int]')
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_to_coo(self):
import scipy.sparse
ser = pd.Series([1, 2, 3],
index=pd.MultiIndex.from_product([[0], [1, 2, 3]],
names=['a', 'b']),
dtype='Sparse[int]')
A, _, _ = ser.sparse.to_coo()
assert isinstance(A, scipy.sparse.coo.coo_matrix)
def test_non_sparse_raises(self):
ser = pd.Series([1, 2, 3])
with pytest.raises(AttributeError, match='.sparse'):
ser.sparse.density
def test_setting_fill_value_fillna_still_works():
# This is why letting users update fill_value / dtype is bad
# astype has the same problem.
arr = SparseArray([1., np.nan, 1.0], fill_value=0.0)
arr.fill_value = np.nan
result = arr.isna()
# Can't do direct comparison, since the sp_index will be different
# So let's convert to ndarray and check there.
result = np.asarray(result)
expected = np.array([False, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_setting_fill_value_updates():
arr = SparseArray([0.0, np.nan], fill_value=0)
arr.fill_value = np.nan
# use private constructor to get the index right
# otherwise both nans would be un-stored.
expected = SparseArray._simple_new(
sparse_array=np.array([np.nan]),
sparse_index=IntIndex(2, [1]),
dtype=SparseDtype(float, np.nan),
)
tm.assert_sp_array_equal(arr, expected)
@pytest.mark.parametrize("arr, loc", [
([None, 1, 2], 0),
([0, None, 2], 1),
([0, 1, None], 2),
([0, 1, 1, None, None], 3),
([1, 1, 1, 2], -1),
([], -1),
])
def test_first_fill_value_loc(arr, loc):
result = SparseArray(arr)._first_fill_value_loc()
assert result == loc
@pytest.mark.parametrize('arr', [
[1, 2, np.nan, np.nan],
[1, np.nan, 2, np.nan],
[1, 2, np.nan],
])
@pytest.mark.parametrize("fill_value", [
np.nan, 0, 1
])
def test_unique_na_fill(arr, fill_value):
a = pd.SparseArray(arr, fill_value=fill_value).unique()
b = pd.Series(arr).unique()
assert isinstance(a, SparseArray)
a = np.asarray(a)
|
tm.assert_numpy_array_equal(a, b)
|
pandas.util.testing.assert_numpy_array_equal
|
import pandas as pd
melb_df =
|
pd.read_csv('data/melb_data_fe.csv')
|
pandas.read_csv
|
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
assert_frame_equal(move_df, cp)
cp.at[0, TRAJ_ID] = 0
assert move_df.loc[0, TRAJ_ID] == 1
assert move_df.loc[0, TRAJ_ID] != cp.loc[0, TRAJ_ID]
def test_generate_tid_based_on_id_datetime():
move_df = _default_move_df()
new_move_df = move_df.generate_tid_based_on_id_datetime(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'12008102305',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'12008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'tid'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TID not in move_df
move_df.generate_tid_based_on_id_datetime()
assert_frame_equal(move_df, expected)
def test_generate_date_features():
move_df = _default_move_df()
new_move_df = move_df.generate_date_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
date(2008, 10, 23),
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
],
columns=['lat', 'lon', 'datetime', 'id', 'date'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DATE not in move_df
move_df.generate_date_features()
assert_frame_equal(move_df, expected)
def test_generate_hour_features():
move_df = _default_move_df()
new_move_df = move_df.generate_hour_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 5],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR not in move_df
move_df.generate_hour_features()
assert_frame_equal(move_df, expected)
def test_generate_day_of_the_week_features():
move_df = _default_move_df()
new_move_df = move_df.generate_day_of_the_week_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Thursday',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'day'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DAY not in move_df
move_df.generate_day_of_the_week_features()
|
assert_frame_equal(move_df, expected)
|
pandas.testing.assert_frame_equal
|
import argparse
import os
import pickle
import sys
import warnings
import numpy as np
import pandas as pd
from data_conversion.data_filtering import get_refined_filtered_data
from features.build_features import build_features
from helpers.utils import output_file
from models.train_predict_model import train_val_predict_model
warnings.filterwarnings("ignore")
sys.path.append("..")
def extract_features(data, labels):
print('Extracting features.')
return build_features(data), labels.values.flatten()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Mycroft Semantic Type detection')
parser.add_argument('--input_data', '-i', default='sherlock', type=str,
help="Choose the type of data (options: sherlock, mycroft)")
parser.add_argument('--extract', '-e', default=False, type=bool,
help="Choose if you want to generate features or not")
parser.add_argument('--split', '-spt', default=False, type=bool,
help="Choose if you want to split the data or not")
parser.add_argument('--train_split', '-ts', default=0.7, type=float,
help="Choose the percentage of the train data split (e.g: 0.7 -> 70% train)")
parser.add_argument('--no_of_tables', '-num', default=5000, type=int,
help="Choose the files with number of tables that is required for processing (options: 5000, "
"10000, 100000, 500000)")
parser.add_argument('--sample', '-smp', default=False, type=bool,
help="Choose if you want to use sample or not")
args = parser.parse_args()
if args.input_data == 'sherlock':
data = pd.read_csv('../resources/data/sherlock/raw/test_values.csv', sep=',', index_col=0, header=None)
labels = pd.read_csv('../resources/data/sherlock/raw/test_labels.csv', sep=',', index_col=0, header=None)
data.head()
labels.head()
label_categories = len(np.unique(np.concatenate(labels.values)))
if args.extract:
X, Y = extract_features(data, labels)
print('Features extracted')
else:
# Load pre-extracted features of sample file
with open('../resources/data/sherlock/processed/X_train.data', 'rb') as f:
X = pickle.load(f)
with open('../resources/data/sherlock/processed/y_train.data', 'rb') as f:
Y = pickle.load(f)
elif args.input_data == 'mycroft':
if not args.sample:
if not os.path.exists(os.path.normpath(
os.path.join(os.path.dirname(__file__), '..', 'resources', 'output',
'mycroft_{}_tables.csv'.format(args.no_of_tables)))):
exit("Please generate the mycroft data with no of tables: {} :)".format(args.no_of_tables))
if args.sample:
input_data = pd.read_csv("../resources/output/sample_with_filter.csv".format(args.no_of_tables),
names=["csv_data"])
else:
input_data = pd.read_csv("../resources/output/mycroft_{}_tables.csv".format(args.no_of_tables),
names=["csv_data"])
filtered_data = get_refined_filtered_data(input_data)
data =
|
pd.DataFrame(filtered_data['data'])
|
pandas.DataFrame
|
# coding: utf-8
# In[1]:
# Fundamentals of Data Science Project
# Part created by <NAME>:
# Experimented with IMDB score with features from gross income prediction feature generation stage
# Dataset: https://www.kaggle.com/deepmatrix/imdb-5000-movie-dataset
# Features:
# Numerical Features: actor1 Facebook likes, actor2 Facebook likes, actor3 Facebook likes, director Facebook likes, budget.
# Text Features (converted to categorical data): actor1 name, actor2 name, actor3 name, director name, country, content rating, language
# Preprocessing-
# Text Features:
# - Text data like top 3 actor names, director names, content rating, country and language have been treated as category
# - categorical data has been labeled and binarized for each feature column (each item in a feature column has unique label and binary form)
# Numerical Features:
# - numerical data have been min max scaled by fitting to minmaxscaler
# - rows with missing gross value and any empty major features have been eliminated
# - preprocessed numerical, categorical data and text data especially for gross prediction with categorical data in mind
# Both numerical and text data has been used for gross prediction/regression.
# Regression Models: Random Forest Regression and Decision Tree Regression.
# Other models tried: SVR
# Evaluation:
# - 5-Fold Cross Validation
# - Evaluation Metrics: Mean Absolute Error, Mean Squared Error, Median Absolute Error
# - Others tried: Explained Var Score, R^2 score have been calculated
# Visualization:
# - actor1, actor2, actor3, director, country, content rating, language by mean gross have been visualized.
# In[2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn import tree
from sklearn import linear_model
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor
from sklearn import metrics
from sklearn.metrics import mean_absolute_error, mean_squared_error, median_absolute_error, explained_variance_score, r2_score
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, HashingVectorizer
# In[3]:
global minval
global maxval
global min_max_scaler
global catagory_features
global number_features
min_max_scaler = preprocessing.MinMaxScaler()
text_features = ['genre', 'plot_keywords', 'movie_title']
catagory_features = ['actor_1_name', 'actor_2_name', 'actor_3_name', 'director_name', 'country', 'content_rating', 'language']
number_features = ['actor_1_facebook_likes', 'actor_2_facebook_likes', 'actor_3_facebook_likes', 'director_facebook_likes','cast_total_facebook_likes','budget', 'gross']
all_selected_features = ['actor_1_name', 'actor_2_name', 'actor_3_name', 'director_name', 'country', 'content_rating', 'language', 'actor_1_facebook_likes', 'actor_2_facebook_likes', 'actor_3_facebook_likes', 'director_facebook_likes','cast_total_facebook_likes','budget', 'gross', 'genres', "imdb_score"]
eliminate_if_empty_list = ['actor_1_name', 'actor_2_name', 'director_name', 'country', 'actor_1_facebook_likes', 'actor_2_facebook_likes', 'director_facebook_likes','cast_total_facebook_likes', 'gross', "imdb_score"]
#preprocessing
def data_clean(path):
read_data = pd.read_csv(path)
select_data = read_data[all_selected_features]
data = select_data.dropna(axis = 0, how = 'any', subset = eliminate_if_empty_list)
data = data.reset_index(drop = True)
for x in catagory_features:
data[x] = data[x].fillna('None').astype('category')
for y in number_features:
data[y] = data[y].fillna(0.0).astype(np.float)
return data
def preprocessing_numerical_minmax(data):
global min_max_scaler
scaled_data = min_max_scaler.fit_transform(data)
return scaled_data
def preprocessing_categorical(data):
label_encoder = LabelEncoder()
label_encoded_data = label_encoder.fit_transform(data)
label_binarizer = preprocessing.LabelBinarizer()
label_binarized_data = label_binarizer.fit_transform(label_encoded_data)
return label_binarized_data
def preprocessing_text(data):
tfidf_vectorizer = TfidfVectorizer()
tfidf_vectorized_text = tfidf_vectorizer.fit_transform(data)
return tfidf_vectorized_text
#regression model training
def regression_without_cross_validation(model, train_data, train_target, test_data):
model.fit(train_data, train_target)
prediction = model.predict(test_data)
return prediction
def regression_with_cross_validation(model, data, target, n_fold, model_name, pred_type):
print(pred_type, " (Regression Model: ", model_name)
cross_val_score_mean_abs_err = cross_val_score(model, data, target, scoring = 'mean_absolute_error', cv = n_fold)
print("\nCross Validation Score (Mean Absolute Error) : \n", -cross_val_score_mean_abs_err)
print("\nCross Validation Score (Mean Absolute Error) (Mean) : \n", -cross_val_score_mean_abs_err.mean())
cross_val_score_mean_sqr_err = cross_val_score(model, data, target, scoring = 'mean_squared_error', cv = n_fold)
print("\nCross Validation Score (Mean Squared Error) : \n", -cross_val_score_mean_sqr_err)
print("\nCross Validation Score (Mean Squared Error) (Mean) : \n", -cross_val_score_mean_sqr_err.mean())
def regression_scores(original_val, predicted_val, model_name):
print("Regression Model Name: ", model_name)
mean_abs_error = mean_absolute_error(original_val, predicted_val)
mean_sqr_error = mean_squared_error(original_val, predicted_val)
median_abs_error = median_absolute_error(original_val, predicted_val)
explained_var_score = explained_variance_score(original_val, predicted_val)
r2__score = r2_score(original_val, predicted_val)
print("\n")
print("\nRegression Scores(train_test_split):\n")
print("Mean Absolute Error :", mean_abs_error)
print("Mean Squared Error :", mean_sqr_error)
print("Median Absolute Error :", median_abs_error)
print("Explained Var Score :", explained_var_score)
print("R^2 Score :", r2__score)
print("\n\n")
#simple task
def inverse_scaling(scaled_val):
unscaled_val = min_max_scaler.inverse_transform(scaled_val)
return unscaled_val
def roundval(value):
return value.round()
def to_millions(value):
return value / 10000000
#evaluation
#plotting actual vs predicted for all data
def prediction_performance_plot(original_val, predicted_val, model_name, start, end, n, plot_type, prediction_type):
#inverse transform and convert to millions
original_val = to_millions(inverse_scaling(original_val))
predicted_val = to_millions(inverse_scaling(predicted_val))
print("\n")
plt.title("\n"+ prediction_type + " Prediction Performance using " + model_name + "(Actual VS Predicted)"+plot_type + "\n")
if plot_type=="all":
plt.plot(original_val, c = 'g', label = "Actual")
plt.plot(predicted_val, c = 'b', label = "Prediction")
if plot_type=="seq":
plt.plot(original_val[start : end + 1], c = 'g', label = "Actual")
plt.plot(predicted_val[start : end + 1], c = 'b', label = "Prediction")
if plot_type=="random":
original_val_list = []
predicted_val_list = []
for k in range(n):
i = random.randint(0, len(predicted_val) - 1)
original_val_list.append(original_val[i])
predicted_val_list.append(predicted_val[i])
plt.plot(original_val_list, c = 'g', label = "Actual")
plt.plot(predicted_val_list, c = 'b', label = "Prediction")
plt.legend(["Actual", "Predicted"], loc = 'center left', bbox_to_anchor = (1, 0.8))
plt.ylabel('Prediction (In Millions)', fontsize = 14)
plt.grid()
plt.show()
#printing actual vs predicted in a range
def print_original_vs_predicted(original_val, predicted_val, i, j, n, model_name, print_type, prediction_type):
#inverse transform and convert to millions
original_val = to_millions(inverse_scaling(original_val))
predicted_val = to_millions(inverse_scaling(predicted_val))
print("\n"+prediction_type + " Comparision of Actual VS Predicted"+print_type+"\n")
if print_type=="seq":
if j<len(predicted_val):
for k in range(i, j + 1):
print("Actual" + prediction_type+" : ", original_val[k], ", Predicted " +prediction_type," : ", predicted_val[k])
if print_type=="random":
for k in range(n):
i = random.randint(0, len(predicted_val) - 1)
print("Actual ", prediction_type, " : ", original_val[i], ", Predicted " +prediction_type+" : ", predicted_val[i])
#plotting actual vs predicted in a randomly using a bar chart
def bar_plot_original_vs_predicted_rand(original_val, predicted_val, n, model_name, pred_type):
#inverse transform and convert to millions
original_val = to_millions(inverse_scaling(original_val))
predicted_val = to_millions(inverse_scaling(predicted_val))
original_val_list = []
predicted_val_list = []
for k in range(n):
i = random.randint(0, len(predicted_val) - 1)
original_val_list.append(original_val[i])
predicted_val_list.append(predicted_val[i])
original_val_df =
|
pd.DataFrame(original_val_list)
|
pandas.DataFrame
|
# Mar21, 2022
##
#---------------------------------------------------------------------
# SERVER only input all files (.bam and .fa) output MeH matrix in .csv
# August 3, 2021 clean
# FINAL github
#---------------------------------------------------------------------
import random
import math
import pysam
import csv
import sys
import pandas as pd
import numpy as np
import datetime
import time as t
from collections import Counter, defaultdict, OrderedDict
#---------------------------------------
# Functions definition
#---------------------------------------
def open_log(fname):
open_log.logfile = open(fname, 'w', 1)
def logm(message):
log_message = "[%s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
print(log_message),
open_log.logfile.write(log_message)
def close_log():
open_log.logfile.close()
# Count # of windows with enough reads for complete/impute
def coverage(methbin,complete,w):
count=0
tot = 0
meth=methbin.iloc[:,methbin.columns!='Qname']
if len(meth.columns)>=w:
for i in range(len(meth.columns)-w+1):
# extract a window
temp = meth.iloc[:,i:i+w].copy()
#print(temp)
tot = tot+1
if (enough_reads(window=temp,complete=complete,w=w)):
count=count+1
#toprint=temp.notnull().sum(axis=1)>=w
#print(toprint.sum())
#print(count)
#print(tot)
return count/tot*100
else:
return 0
# Check whether a window has enough reads for complete/impute
def enough_reads(window,w,complete):
temp=np.isnan(window).sum(axis=1)==0
if complete: # For heterogeneity estimation
return temp.sum()>=2**w
else: # for imputation
tempw1=np.isnan(window).sum(axis=1)==1
return temp.sum()>=2**(w-2) and tempw1.sum()>0
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def getcomplete(window,w):
temp=np.isnan(window).sum(axis=1)==0
mat=window[np.where(temp)[0],:]
#temp=window.notnull().sum(axis=1)>=w
#mat=window.iloc[np.where(temp)[0],:]
#else:
# temp=mat.notnull().sum(axis=1)>=w-1
return mat
def PattoDis(mat,dist=1):
s=mat.shape[0]
dis=np.zeros((s,s))
for i in range(s):
for j in range(s):
if j<i:
if dist==1:
d=Ham_d(mat.iloc[i,],mat.iloc[j,])
else:
d=WDK_d(mat.iloc[i,],mat.iloc[j,])
dis[i,j]=dis[j,i]=d
return dis
def Ham_d(pat1,pat2):
return (pat1!=pat2).sum()
def WDK_d(pat1,pat2):
d=0
w=pat1.shape[0]
for i in range(w): # k-1
for j in range(w-i): # starting pos
s=(w-i-1)*(1-np.all(pat1[j:j+i+1]==pat2[j:j+i+1]))
d+=s
return d
# input a window of w CGs and output a list of proportions with starting genomic location and genomic distance across
def window_summ(pat,start,dis,chrom):
m=np.shape(pat)[0]
d=np.shape(pat)[1]
all_pos=np.zeros((2**d,d))
for i in range(d):
all_pos[:,i]=np.linspace(0,2**d-1,2**d)%(2**(i+1))//(2**i)
#print(all_pos)
prob=np.zeros((2**d,1))
#print(prob)
for i in range(2**d):
count = 0
for j in range(m):
if (all_pos[i,:]==pat.iloc[j,:]).sum()==d:
count += 1
#print(count)
prob[i]=count
if d==3:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'dis':dis})
if d==4:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'dis':dis})
if d==5:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'dis':dis})
if d==6:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'p33':prob[32],'p34':prob[33],'p35':prob[34],\
'p36':prob[35],'p37':prob[36],'p38':prob[37],'p39':prob[38],'p40':prob[39],\
'p41':prob[40],'p42':prob[41],'p43':prob[42],'p44':prob[43],'p45':prob[44],\
'p46':prob[45],'p47':prob[46],'p48':prob[47],'p49':prob[48],'p50':prob[49],\
'p51':prob[50],'p52':prob[51],'p53':prob[52],'p54':prob[53],'p55':prob[54],\
'p56':prob[55],'p57':prob[56],'p58':prob[57],'p59':prob[58],'p60':prob[59],\
'p61':prob[60],'p62':prob[61],'p63':prob[62],'p64':prob[63],'dis':dis})
return out
def MeHperwindow(pat,start,dis,chrom,D,w,optional,MeH=2,dist=1,strand='f'):
count=np.zeros((2**w,1))
m=np.shape(pat)[0]
pat=np.array(pat)
if w==2:
pat = Counter([str(i[0])+str(i[1]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00','10','01','11']])
if w==3:
pat = Counter([str(i[0])+str(i[1])+str(i[2]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000','100','010','110','001','101','011','111']])
if w==4:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if w==5:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if w==6:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
if MeH==1: # Abundance based
score=(((count/m)**2).sum(axis=0))**(-1)
elif MeH==2: # PWS based
interaction=np.multiply.outer(count/m,count/m).reshape((2**w,2**w))
Q=sum(sum(D*interaction))
#print("Q =",Q)
if Q==0:
score=0
else:
score=(sum(sum(D*(interaction**2)))/(Q**2))**(-0.5)
elif MeH==3: #Phylogeny based
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if dist==1 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(0.5,16)),np.repeat(0.25,6)),[0.5]),np.repeat(0.25,6))
#phylotree=np.repeat(0,1).append(np.repeat(0.5,16)).append(np.repeat(0.25,6)).append(0.5).append(np.repeat(0.25,6))
countn=np.zeros(30)
#count<-rep(0,29)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[4]+countn[7]
countn[18]=countn[9]+countn[12]
countn[19]=countn[1]+countn[2]
countn[20]=countn[3]+countn[6]
countn[21]=countn[17]+countn[18]
countn[22]=countn[19]+countn[20]
countn[23]=countn[21]+countn[22]
countn[24]=countn[5]+countn[8]
countn[25]=countn[10]+countn[13]
countn[26]=countn[24]+countn[25]
countn[27]=countn[23]+countn[26]
countn[28]=countn[11]+countn[14]
countn[29]=countn[27]+countn[28]
#Q=sum(sum(phylotree*count))
if dist==2 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(3,16)),np.repeat(1.5,6)),[3.2,0.8]),np.repeat(2,3),np.repeat(1.5,2))
#phylotree=c(rep(3,16),rep(1.5,6),3.2,0.8,rep(2,3),1.5,1.5)
countn=np.zeros(30)
#print(count)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[1]+countn[2]
countn[18]=countn[5]+countn[8]
countn[19]=countn[3]+countn[6]
countn[20]=countn[10]+countn[13]
countn[21]=countn[4]+countn[7]
countn[22]=countn[11]+countn[14]
countn[23]=countn[17]+countn[18]
countn[24]=countn[21]+countn[22]
countn[25]=countn[19]+countn[20]
countn[26]=countn[23]+countn[24]
countn[27]=countn[25]+countn[26]
countn[28]=countn[9]+countn[12]
countn[29]=countn[27]+countn[28]
#Q=sum(phylotree*count)
if dist==2 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(1.5,8)),np.repeat(0.75,3)),np.repeat(1.5,0.75))
#phylotree=np.array(0).append(np.repeat(1.5,8)).append(np.repeat(0.75,3)).append(1.5,0.75)
#phylotree=c(rep(1.5,8),rep(0.75,3),1.5,0.75)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#Q=sum(phylotree*count)
if dist==1 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(0.5,8)),np.repeat(0.25,3)),[0.5,0.25])
#phylotree=np.array(0).append(np.repeat(0.5,8)).append(np.repeat(0.25,3)).append(0.5,0.25)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#print("count = ",count)
#print("phylotree = ",phylotree)
Q=sum(phylotree*countn)
score=sum(phylotree*((countn/Q)**2))**(-1)
elif MeH==4: #Entropy
score=0
for i in count:
if i>0:
score-=(i/m)*np.log2(i/m)/w
elif MeH==5: #Epipoly
score=1-((count/m)**2).sum(axis=0)
if optional:
if MeH!=3:
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if w==3:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==4:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==5:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==6:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out, opt
else:
out=pd.DataFrame({'chrom':chrom,'pos':start,'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
return window
def CGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# initialise data frame for output
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','strand','depth'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
# all methylation patterns for Methylation heterogeneity evaluation
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
# distance matrix, also for Methylation heterogeneity evaluation
D=PattoDis(pd.DataFrame(all_pos),dist=dist) # 1:Hamming distance, 2: WDK
start=datetime.datetime.now()
# vector for saving methylation statuses before imputation
MU=np.zeros((2,w))
# screen bamfile by column
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now(),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# Forward strand, check if 'CG' in reference genome
if (fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+2)=='CG'):
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
# append reads in the column
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
temp=temp.append(df2, ignore_index=True)
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['G'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
# merge with other columns
if (not temp.empty):
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# Reverse strand, check if 'CG' in reference genome
if pileupcolumn.pos>1:
if (fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos+1)=='CG'):
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
dfr2 = pd.DataFrame(data=dr)
tempr=tempr.append(dfr2, ignore_index=True)
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['C'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
# Impute and estimate, if there are 2w-1 columns
if never and aggreC.shape[1] == (2*w):
# C/G to 1, rest to 0, N to NA
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC
meth = methbin.copy()
# remove read ID
meth = meth.drop('Qname',axis=1)
# back up for imputation
if imp:
methtemp = meth.copy()
# imputation by sliding window of 1 C
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# save methylation statuses before imputation
# check if eligible for imputation, impute
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# overwrite imputed window
meth = methtemp.copy()
# Evaluate methylation level and methylation heterogeneity and append to result
for i in range(0,w,1): # w windows
window = meth.iloc[:,range(i,i+w)].values
# check if enough complete patterns for evaluating MeH
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
# if need to output methylation patterns
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
# evaluate and output MeH
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
# remove 1 column
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
# drop rows with no values
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# Reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
#for i in range(0,meth.shape[1]-w+1,1):
#if i>w-2 and i<2*w:
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CG_ML_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CG'
print("Done CG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
#samfile.close()
def CHHgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
start=datetime.datetime.now()
MU=np.zeros((2,w))
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHH %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# forward
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)!='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)!='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
tempr=tempr.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
#if enough_reads(window,w,complete=True):
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','G','A'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
# MeH eligibility
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='r',optional=optional)
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHH. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CHH_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHH_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHH_opt_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CHH'
print("Done CHH for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
def CHGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
#directory = "Outputs/" + str(sample) + '.csv' #original filename of .bams
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
coverage = cov_context = 0
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','depth','strand'])
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
#chr_lengths = fastafile.get_reference_length(chrom)
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
D=PattoDis(pd.DataFrame(all_pos),dist=dist) #1:Hamming distance
MU=np.zeros((2,w))
start=datetime.datetime.now()
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CHG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='C' and fastafile.fetch(chrom,pileupcolumn.pos+1,pileupcolumn.pos+2)!='G' and fastafile.fetch(chrom,pileupcolumn.pos+2,pileupcolumn.pos+3)=='G':
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
#df2.head()
temp=temp.append(df2, ignore_index=True)
#temp.head()
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['T'],0)
temp2 = temp2.replace(['A','G'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not temp.empty):
#temp.head()
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# reverse
if pileupcolumn.pos>2:
if fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+1)=='G' and fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos)!='C' and fastafile.fetch(chrom,pileupcolumn.pos-2,pileupcolumn.pos-1)=='C':
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # G
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2r = pd.DataFrame(data=dr)
#df2.head()
tempr=tempr.append(df2r, ignore_index=True)
#temp.head()
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['A'],0)
temp2 = temp2.replace(['C','T'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/float(MC+UC)}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
#temp.head()
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
if never and aggreC.shape[1] == (2*w):
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','G','N'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
ResultPW=ResultPW.append(toappend)
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','C','T'],np.nan)
methbin = aggreR # backup
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(0,w,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
ResultPW=ResultPW.append(toappend)
aggreR = aggreR.drop(meth.columns[0:1],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
#total += w
#------------------
# SECONDARY CASE
#------------------
if (aggreC.shape[1] == (3*w-1)):
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['N','A','G'],np.nan)
methbin = aggreC # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='f')
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos))
aggreC = aggreC.drop(meth.columns[0:w],axis=1)
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#print(aggreC)
#total += w
# reverse
if (aggreR.shape[1] == (3*w-1)):
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['N','T','C'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
meth = methtemp.copy()
# compute coverage and output summary
for i in range(w-1,2*w-1,1):
window = meth.iloc[:,range(i,i+w)].values
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
Resultopt=Resultopt.append(opt)
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,optional=optional,strand='r')
ResultPW=ResultPW.append(toappend)
if ResultPW.shape[0] % 100000 == 1:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if not silence:
print("Checkpoint CHG. For file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
aggreR = aggreR.drop(meth.columns[0:w],axis=1)
aggreR.dropna(axis = 0, thresh=2, inplace = True)
if ResultPW.shape[0]>0:
ResultPW.to_csv(r"MeHdata/CHG_%s.csv"%(filename),index = False, header=True)
if melv:
ResML.to_csv(r"MeHdata/CHG_ML_%s.csv"%(filename),index = False, header=True)
if optional:
Resultopt.to_csv(r"MeHdata/CHG_opt_%s.csv"%(filename),index = False, header=True)
return sample, coverage, cov_context, 'CHG'
print("Done CHG for file %s: %s results obtained up to position chr %s: %s." % (filename,ResultPW.shape[0],chrom,pileupcolumn.pos+1))
def split_bam(samplenames,Folder):
# get bam size
spbam_list = []
bamfile = samplenames + '.bam'
statinfo_out = os.stat(Folder+bamfile)
bamsize = statinfo_out.st_size
samfile = pysam.Samfile(Folder+bamfile, "rb")
fileout_base = os.path.splitext(bamfile)[0] # filename
ext = '.bam'
x = 0
fileout = Folder+fileout_base+"_" + str(x)+ext # filename_x.bam
print("fileout",fileout)
header = samfile.header
outfile = pysam.Samfile(fileout, "wb", header = header)
sum_Outfile_Size=0
for reads in samfile.fetch():
outfile.write(reads)
statinfo_out = os.stat(fileout)
outfile_Size = statinfo_out.st_size
if(outfile_Size >=337374182 and sum_Outfile_Size <= bamsize):
sum_Outfile_Size = sum_Outfile_Size + outfile_Size
x = x + 1
spbam_list.append(fileout_base + "_" + str(x)+ext)
outfile.close()
pysam.index(fileout)
fileout = Folder+fileout_base + "_" + str(x)+ext
print("fileout",fileout)
outfile = pysam.Samfile(fileout, "wb",header = header)
outfile.close()
pysam.index(fileout)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--windowsize",type=int, default=4 ,help='number of CGs')
parser.add_argument("-c", "--cores",type=int, default=4, help='number of cores')
parser.add_argument("-m", "--MeH",type=int, default=2, help='Methylation heterogeneity score 1:Abundance 2:PW 3:Phylogeny')
parser.add_argument("-d", "--dist",type=int, default=1, help='Distance between methylation patterns 1:Hamming 2:WDK')
parser.add_argument("--CG", default=False, action='store_true', help='Include genomic context CG')
parser.add_argument("--CHG", default=False, action='store_true', help='Include genomic context CHG')
parser.add_argument("--CHH", default=False, action='store_true', help='Include genomic context CHH')
parser.add_argument("--opt", default=False, action='store_true', help='Outputs compositions of methylation patterns')
parser.add_argument('--mlv', default=False, action='store_true', help='Outputs methylation levels')
parser.add_argument('--imp', default=True, action='store_false', help='Implement BSImp (impute if valid)')
args = parser.parse_args()
import sys
import time
import os
import pandas as pd
import multiprocessing
from joblib import Parallel, delayed
#num_cores = multiprocessing.cpu_count()
if __name__ == "__main__":
open_log('MeHscreening.log')
logm("Call genome screening.")
#start = time.time()
Folder = 'MeHdata/'
files = os.listdir(Folder)
bam_list = []
# all samples' bam files
for file in files:
filename, file_extension = os.path.splitext(file)
if file_extension == '.fa':
fa = filename
if file_extension == '.bam':
bam_list.append(filename)
#if 'cores' in args:
# num_cores = args.cores
#else:
# num_cores = 4
Parallel(n_jobs=args.cores)(delayed(split_bam)(bamfile,Folder=Folder) for bamfile in bam_list)
spbam_list = []
tempfiles = os.listdir(Folder)
for file in tempfiles:
filename, file_extension = os.path.splitext(file)
if file_extension=='.bam' and filename not in bam_list:
spbam_list.append(filename)
#print(spbam_list)
topp = pd.DataFrame(columns=['sample','coverage','context_coverage','context'])
#CG = []
#start=t.time()
if args.CG:
con='CG'
CG=Parallel(n_jobs=args.cores)(delayed(CGgenome_scr)(bamfile,w=args.windowsize,fa=fa,MeH=args.MeH,dist=args.dist,optional=args.opt,melv=args.mlv,imp=args.imp) for bamfile in spbam_list)
logm("Merging MeH within samples for CG.")
# merge MeH within sample
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
print("Merging within sample",sample,"...")
if not sample == filename:
res_dir = Folder + con + '_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
#os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
#Toappend=Toappend.dropna(axis = 0, thresh=4, inplace = True)
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
# not into bins of 400bp
if args.opt:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
#print("sample = ",sample)
if not sample == filename:
res_dir = Folder + con + '_opt_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_opt_' +file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False, header = True)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend.to_csv(res_dir,index = False,header=True)
#os.chdir('../')
#os.chdir(outputFolder)
logm("Merging ML within samples for CG.")
# append ML within samples
if args.mlv:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
res_dir = Folder + con + '_ML_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_ML_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
#os.remove(toapp_dir)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Count = Toappend.groupby(['chrom','bin','strand']).size().reset_index(name='counts')
Toappend=Toappend.merge(Count, on=['chrom','bin','strand'])
#print(Toappend)
conditions = [
(Toappend['counts'] > 4),
(Toappend['counts'] < 5)
]
# create a list of the values we want to assign for each condition
values = [Toappend['ML'], np.nan]
# create a new column and use np.select to assign values to it using our lists as arguments
Toappend['ML'] = np.select(conditions, values)
Toappend=Toappend.drop(columns=['counts','pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'ML': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
logm("Merging ML between samples for CG.")
# merge ML between samples
if args.mlv:
for sample in bam_list:
tomerge_dir = Folder + con + '_ML_' + str(sample) + '.csv'
res_dir = Folder + con + '_ML_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'ML': sample})
Result=Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result = Result.rename(columns={'ML': sample})
#Result = Result.drop(columns=['counts','pos','depth','dis'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(res_dir,index = False,header=True)
os.remove(tomerge_dir)
logm("Merging MeH between samples for CG.")
# merge MeH between samples
for sample in bam_list:
tomerge_dir = Folder + con + '_' + str(sample) + '.csv'
res_dir = Folder + con + '_' + 'Results.csv'
if os.path.exists(res_dir):
Result = pd.read_csv(res_dir)
Tomerge = pd.read_csv(tomerge_dir)
Tomerge.dropna(axis = 0, thresh=4, inplace = True)
Tomerge = Tomerge.rename(columns={'MeH': sample})
Result = Result.merge(Tomerge, on=['chrom','bin','strand'])
Result.dropna(axis = 0, thresh=4, inplace = True)
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
else:
Result = pd.read_csv(tomerge_dir)
Result.head()
Result.dropna(axis = 0, thresh=4, inplace = True)
Result = Result.rename(columns={'MeH': sample})
Result.to_csv(Folder + con + '_' +'Results.csv',index = False,header=True)
os.remove(tomerge_dir)
Result.to_csv(Folder + con + '_' +'Results.csv' ,index = False,header=True)
print("All done.",len(bam_list),"bam files processed and merged for CG.")
logm("All done. "+str(len(bam_list))+" bam files processed and merged for CG.")
for i in CG:
toout=pd.DataFrame({'sample':i[0],'coverage':i[1],'context_coverage':i[2],'context':i[3]},index=[0])
topp=topp.append(toout)
if args.CHG:
con='CHG'
CG=Parallel(n_jobs=args.cores)(delayed(CHGgenome_scr)(bamfile,w=args.windowsize,fa=fa,MeH=args.MeH,dist=args.dist,optional=args.opt,melv=args.mlv,imp=args.imp) for bamfile in spbam_list)
logm("Merging MeH within samples for CHG.")
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
print("Merging within sample",sample,"...")
if not sample == filename:
res_dir = Folder + con + '_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_' + file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Tomod = Tomod.append(Toappend)
Tomod.to_csv(res_dir,index = False,header=True)
else:
Toappend = pd.read_csv(toapp_dir)
Toappend['bin'] = [((x-1)//400)*400+200 for x in Toappend['pos']]
Toappend=Toappend.drop(columns=['pos'])
Toappend=Toappend.groupby(['chrom','bin','strand']).agg({'MeH': 'mean'}).reset_index()
Toappend.to_csv(res_dir,index = False,header=True)
# not into bins of 400bp
if args.opt:
for file in spbam_list:
filename, file_extension = os.path.splitext(file)
sample = str.split(file,'_')[0]
#print("sample = ",sample)
if not sample == filename:
res_dir = Folder + con + '_opt_' + str(sample) + '.csv'
toapp_dir = Folder + con + '_opt_' +file + '.csv'
if os.path.exists(res_dir):
Tomod = pd.read_csv(res_dir)
Toappend =
|
pd.read_csv(toapp_dir)
|
pandas.read_csv
|
import urllib
import pytest
import pandas as pd
from pandas import testing as pdt
from anonympy import __version__
from anonympy.pandas import dfAnonymizer
from anonympy.pandas.utils_pandas import load_dataset
@pytest.fixture(scope="module")
def anonym_small():
df = load_dataset('small')
anonym = dfAnonymizer(df)
return anonym
@pytest.fixture(scope="module")
def anonym_big():
try:
df = load_dataset('big')
anonym = dfAnonymizer(df)
except urllib.error.HTTPError:
anonym = None
return anonym
def test_anonym_obj(anonym_small, anonym_big):
assert isinstance(anonym_small, dfAnonymizer), "should have\
returned `dfAnonymizer` object"
if anonym_big is None:
assert False, "Failed to fetch the DataFrame"
assert isinstance(anonym_big, dfAnonymizer), "should have returned\
`dfAnonymizer` object"
def test_numeric_noise(anonym_small):
output = anonym_small.numeric_noise('age', seed=42, inplace=False)
expected = pd.Series([38, 47], dtype='int64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_noise(['age', 'salary'],
seed=42,
inplace=False)
expected = pd.DataFrame({'age': [38, 47],
'salary': [59239.79912097112, 49323.30756879504]})
pdt.assert_frame_equal(expected, output)
def test_numeric_binning(anonym_small):
output = anonym_small.numeric_binning('salary', bins=2, inplace=False)
dtype = pd.CategoricalDtype([
pd.Interval(49315.0, 54279.0, closed='right'),
pd.Interval(54279.0, 59234.0, closed='right')],
ordered=True)
expected = pd.Series([
pd.Interval(54279.0, 59234.0, closed='right'),
pd.Interval(49315.0, 54279.0, closed='right')],
dtype=dtype)
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_binning(['age', 'salary'],
bins=2,
inplace=False)
dtype2 = pd.CategoricalDtype([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
ordered=True)
ser2 = pd.Series([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
dtype=dtype2)
expected = pd.DataFrame({'age': ser2, 'salary': expected})
pdt.assert_frame_equal(expected, output)
def test_numeric_masking(anonym_small):
output = anonym_small.numeric_masking('age', inplace=False)
expected = pd.Series([7.5, -7.5], dtype='float64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_masking(['age', 'salary'], inplace=False)
expected = pd.DataFrame({'age': [-4954.900676201789, 4954.900676201798],
'salary': [5.840670901327418e-15,
5.840670901327409e-15]})
pdt.assert_frame_equal(expected, output)
def test_numeric_rounding(anonym_small):
output = anonym_small.numeric_rounding('salary', inplace=False)
expected = pd.Series([60000.0, 50000.0], dtype='float64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_rounding(['age', 'salary'], inplace=False)
expected = pd.DataFrame({'age': {0: 30, 1: 50}, 'salary': {0: 60000.0,
1: 50000.0}})
pdt.assert_frame_equal(expected, output)
@pytest.mark.skipif(__version__ == '0.2.4',
reason="Requires anonympy >= 0.2.5")
def test_categorical_fake(anonym_small):
output = anonym_small.categorical_fake('name',
locale=['en_US'],
seed=42,
inplace=False)
expected = pd.Series(['<NAME>', '<NAME>'])
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.categorical_fake(['name', 'email'],
locale=['en_GB'],
seed=42,
inplace=False)
expected = pd.DataFrame({'name': {0: '<NAME>', 1: '<NAME>'},
'email': {0: '<EMAIL>',
1: '<EMAIL>'}})
|
pdt.assert_frame_equal(expected, output)
|
pandas.testing.assert_frame_equal
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.ix[1:]
assert_series_equal(result, expected)
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
self.assertTrue(s.index.is_object())
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
self.assertTrue((ab.reindex(diff_b) == fill).all())
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
self.assertEqual(aa.name, 'ts')
self.assertEqual(ea.name, 'ts')
self.assertEqual(ab.name, 'ts')
self.assertEqual(eb.name, 'ts')
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
self.assertFalse((a[:5] == 5).any())
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
self.assertTrue((a[:5] == 5).all())
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
self.assertFalse((b[:3] == 5).any())
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
self.assertTrue((b[:2] == 5).all())
def test_align_sameindex(self):
a, b = self.ts.align(self.ts, copy=False)
self.assertIs(a.index, self.ts.index)
self.assertIs(b.index, self.ts.index)
# a, b = self.ts.align(self.ts, copy=True)
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
self.assertTrue(np.may_share_memory(self.series.index,
identity.index))
except (AttributeError):
pass
self.assertTrue(identity.index.is_(self.series.index))
self.assertTrue(identity.index.identical(self.series.index))
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
self.assertEqual(val, self.series[idx])
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
self.assertEqual(val, self.ts[idx])
stuffSeries = self.ts.reindex(subIndex)
self.assertTrue(np.isnan(stuffSeries).all())
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in
|
compat.iteritems(subNonContig)
|
pandas.compat.iteritems
|
import multiprocessing as mp
import os
from argparse import ArgumentParser
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from pandas import DataFrame
from statsmodels.tsa.vector_ar.var_model import VARResultsWrapper
from torch.utils.data import DataLoader
from tqdm import tqdm
from dataset import EnergyDataset
global model
def do(x, counter):
x = x.squeeze(0).cpu().numpy()
preds = model.forecast(x, steps=168)[:, 1:]
return counter, preds
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--weights_path", type=str, default=".", help="Model weights path")
parser.add_argument("--save_dir", type=str, help="Directory where weights and results are saved", default=".")
parser.add_argument(
"--data_dir",
type=str,
help="Directory containing the data you want to predict",
default="/hkfs/work/workspace/scratch/bh6321-energy_challenge/data",
)
args = parser.parse_args()
save_dir = args.save_dir
data_dir = args.data_dir
# load model with pretrained weights
weights_path = os.path.join(args.weights_path, "var_model.weights")
if not os.path.exists(weights_path):
raise FileExistsError(f"The file for the trained model does not exist: {weights_path}")
model = VARResultsWrapper.load(weights_path)
# dataloader
test_file = os.path.join(data_dir, "test.csv")
valid_file = os.path.join(data_dir, "valid.csv")
data_file = test_file if os.path.exists(test_file) else valid_file
df =
|
pd.read_csv(data_file)
|
pandas.read_csv
|
"""
This module contains support functions and libraries used in views and tasks.
"""
import json
import logging
import os
import shutil
from datetime import datetime
from itertools import permutations
import yaml
import pandas as pd
import numpy as np
from django.db.models import Func, Value
from django.utils.timezone import make_aware
import pint
CALLIOPE = 18
logging.addLevelName(CALLIOPE, "CALLIOPE")
logger = logging.getLogger("calliope")
def get_model_logger(log_file):
"""
Get logger for logging model run info or errors
:param log_file:
:return: logger
"""
logging.basicConfig(level=logging.INFO,
format="[%(asctime)s] %(levelname)s: %(message)s")
if logger.handlers:
logger.removeHandler(logger.handlers[0])
file_handler = logging.FileHandler(log_file)
formatter = logging.Formatter(
"[%(asctime)s] %(levelname)s: %(message)s<br>",
datefmt="%Y-%m-%d %H:%M:%S")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(CALLIOPE)
return logger
def dateformats():
"""
Yield common combinations of valid datetime formats.
"""
years = ("%Y", "%y",)
months = ("%m", "%b", "%B",)
days = ("%d",)
times = (
"%H:%M", "%H:%M:%S", "%I:%M%p", "%I:%M:%S%p",
"%I:%M %p", "%I:%M:%S %p", "%I%p")
for year in years:
for month in months:
for day in days:
date_orders = ((year, month, day), (month, day, year),
(day, month, year), (month, day), (day, month))
for args in date_orders:
for sep in (" ", "/", "-"):
date = sep.join(args)
for time in times:
for combo in permutations([date, time]):
yield " ".join(combo).strip()
def get_date_format(string):
"""
Detect datetime.strptime format or None from a string.
from http://code.activestate.com/recipes/578245-flexible-datetime-parsing/
"""
for fmt in dateformats():
try:
datetime.strptime(string, fmt)
return fmt
except ValueError:
pass
return None
def zip_folder(path, keep_folder=True):
"""
Zip contents of folder
Parameters
----------
path : str
folder path to be zipped
"""
zip_file = path + ".zip"
if not os.path.exists(zip_file):
root_dir = os.path.dirname(path)
base_dir = "./" + os.path.basename(path)
shutil.make_archive(path, "zip", root_dir, base_dir=base_dir)
if not keep_folder:
shutil.rmtree(path)
return zip_file
def dictfetchall(cursor):
"""
Return all rows from a cursor as a dict
"""
columns = [col[0] for col in cursor.description]
return [dict(zip(columns, row)) for row in cursor.fetchall()]
def list_to_yaml(table_list, filename):
"""
Convert a list of strings to YAML file format
"""
X = [x[0].split('||') for x in table_list]
timeseries = [x[1] for x in table_list]
d = {}
for index in range(len(X)):
path = X[index]
current_level = d
for i, part in enumerate(path):
if i < (len(path) - 1):
if i == (len(path) - 2):
if timeseries[index] is True:
parameter = path[i]
technology = path[i - 2]
if (i - 4 > 0):
location = path[i - 4]
current_level[part] = \
"file={}--{}--{}.csv:value".format(
location, technology, parameter)
else:
current_level[part] = \
"file={}--{}.csv:value".format(
technology, parameter)
elif path[i + 1] == "":
current_level[part] = None
elif path[i + 1] == 'True':
current_level[part] = True
elif path[i + 1] == 'False':
current_level[part] = False
else:
try:
string = path[i + 1].replace(", ", ",")
for char in ['\'', '“', '”', '‘', '’']:
string = string.replace(char, '\"')
current_level[part] = json.loads(string)
except Exception:
try:
current_level[part] = float(path[i + 1])
except ValueError:
current_level[part] = path[i + 1]
if part not in current_level:
current_level[part] = {}
current_level = current_level[part]
with open(filename, 'w') as outfile:
yaml.dump(d, outfile, default_flow_style=False)
return True
def get_cols_from_csv(filename):
"""
Read the columns from a csv
"""
df = pd.read_csv(filename)
return df.columns
def load_timeseries_from_csv(filename, t_index, v_index, has_header=False):
"""
Build a timeseries as a pandas dataframe from a csv file
"""
t_index = int(t_index)
v_index = int(v_index)
date_format = None # just use pd.to_datetime if None
df = pd.DataFrame()
# Cache date format for large files (sample 20th row):
if filename.size > 2e6: # 2 MB
read_df = pd.read_csv(filename.path, header=None, skiprows=20,
usecols=[t_index, v_index],
skipinitialspace=True, nrows=1)
test_date = read_df.iloc[0, t_index]
date_format = get_date_format(test_date)
# Load Full Data
skiprows = 0
if has_header:
skiprows = 1
read_df = pd.read_csv(filename.path, header=None, skiprows=skiprows,
usecols=[t_index, v_index],
skipinitialspace=True, chunksize=100000)
# Convert to Timestamps
for chunk_df in read_df:
if date_format is None:
# Slow: We didn't find the strptime format:
chunk_dates = pd.to_datetime(chunk_df.loc[:, t_index])
else:
# Fast: Try using strptime
try:
chunk_dates = chunk_df[[t_index]].astype(str).iloc[:, 0].apply(
lambda x: make_aware(datetime.strptime(x, date_format)))
except (ValueError, AttributeError):
chunk_dates = pd.to_datetime(chunk_df.loc[:, t_index])
chunk_df.iloc[:, t_index] = chunk_dates
# Filter out NaN values
chunk_df = chunk_df[pd.notnull(chunk_df)]
chunk_df = chunk_df[
|
pd.notnull(chunk_df)
|
pandas.notnull
|
#dependencies
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import confusion_matrix
from sklearn import svm
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import normalize
import itertools
import matplotlib.pyplot as plt
import pandas as pd
#function defination to plot the confusion matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
train_data = pd.read_csv('criminal_train.csv')
y_train = train_data['Criminal']
X_train = train_data.drop(['PERID','Criminal'],axis = 1).values
X_train = normalize(X_train, axis = 0)
test_data = pd.read_csv('criminal_test.csv')
X_test = test_data.drop(['PERID'],axis = 1).values
X_test = normalize(X_test, axis = 0)
#model structure
model = VotingClassifier(
estimators=[
( 'gb',GradientBoostingClassifier(n_estimators=500,verbose =1,max_depth = 6 )),
('rf', RandomForestClassifier(n_estimators=1000, verbose = 1))],
voting='soft')
model = AdaBoostClassifier(base_estimator= model, n_estimators =10 )
#training the model
print('training the model: ')
model.fit(X_train, y_train)
print('model trained: ')
model.score(X_train,y_train)
X_train, X_test, y_train, y_test = train_test_split(X_train ,y_train, train_size = .9)
model.fit(X_train, y_train)
model.score(X_test, y_test)
#####################################################
#predicting values on test file
df = pd.read_csv('criminal_test.csv')
predicted = model.predict(X_test)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('output_old.csv', index = False)
###############################################################
print(model.score(X_train, y_train))
# predicted value
predicted_y = model.predict(X_train)
#plot the confusion matrix
cnf_matrix = confusion_matrix(y_train, predicted_y)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=[0,1],
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=[0,1], normalize=True,
title='Normalized confusion matrix')
plt.show()
################################################3
#input file
df = pd.read_csv('criminal_train.csv')
# KNN classifier
model_knn =KNeighborsClassifier(n_neighbors= 5, weights='distance', n_jobs = 4)
model_knn.fit(X_train, y_train)
model_knn.score(X_train,y_train)
predicted = model_knn.predict(X_train)
df = pd.read_csv('criminal_train.csv')
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('input_knn.csv', index = False)
## random forest classifier
model_rf =RandomForestClassifier(n_estimators=1000, verbose = 1)
model_rf.fit(X_train, y_train)
df = pd.read_csv('criminal_train.csv')
predicted = model_rf.predict(X_train)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('input_rf.csv', index = False)
# ada boosting clssifier
model_ab = AdaBoostClassifier(n_estimators=500)
model_ab.fit(X_train, y_train)
df = pd.read_csv('criminal_train.csv')
X_test = df.drop(['PERID'],axis =1).values
predicted = model_ab.predict(X_train)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('input_ab.csv', index = False)
### gradient boosting classifier
model_gb = GradientBoostingClassifier(n_estimators=500,verbose =1,max_depth = 6 )
model_gb.fit(X_train, y_train)
df = pd.read_csv('criminal_train.csv')
X_test = df.drop(['PERID'],axis =1).values
predicted = model_gb.predict(X_train)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('input_gb.csv', index = False)
#logistic regression
model_lr =LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=.3)
model_lr.fit(X_train, y_train)
predicted = model_lr.predict(X_train)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('input_lr.csv', index = False)
## support vector machines
model_svm =svm.SVC(C=.75, verbose = True)
model_svm.fit(X_train, y_train)
predicted = model_svm.predict(X_train)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('input_svm.csv', index = False)
##############################################
### output file
test_data = pd.read_csv('criminal_test.csv')
X_test = test_data.drop(['PERID'],axis = 1).values
X_test = normalize(X_test, axis = 0)
# KNN classifier
predicted = model_knn.predict(X_test)
df = pd.read_csv('criminal_test.csv')
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('output_knn.csv', index = False)
## random forest classifier
predicted = model_rf.predict(X_test)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('output_rf.csv', index = False)
# ada boosting clssifier
predicted = model_ab.predict(X_test)
frame = pd.DataFrame()
frame['PERID'] = df['PERID']
frame['Criminal'] = predicted
frame.to_csv('output_ab.csv', index = False)
### gradient boosting classifier
predicted = model_gb.predict(X_test)
frame =
|
pd.DataFrame()
|
pandas.DataFrame
|
####
#
# The MIT License (MIT)
#
# Copyright 2021, 2022 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
####
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import logging
from matplotlib import cm
from copy import deepcopy
from typing import List, Optional, Union
from scipy.stats import wilcoxon, ttest_rel
# ================
# Setup the Logger
LOGGER = logging.getLogger("plotting_utils")
LOGGER.setLevel(logging.INFO)
LOGGER.propagate = False
CH = logging.StreamHandler()
CH.setLevel(logging.INFO)
FORMATTER = logging.Formatter('[%(levelname)s] %(name)s : %(message)s')
CH.setFormatter(FORMATTER)
LOGGER.addHandler(CH)
# ================
def _get_topk(x, k, method):
"""
Task: Pandas aggregation function to compute the top-k acc.
"""
out = 0.0
if method == "average":
for xi in x:
out += (np.mean(xi) <= k)
elif method == "csi":
for xi in x:
y = np.arange(xi[0], xi[1] + 1)
for yi in y:
if yi <= k:
out += (1.0 / len(y))
else:
raise ValueError("Invalid method: '%s'" % method)
# Get accuracy as percentages
out /= len(x)
out *= 100
return out
def plot__02__b(
results: pd.DataFrame, ks: Optional[Union[List[int], int]] = None, min_class_support: int = 50,
sharey: str = "all", n_samples: int = 50, topk_method: str = "csi", fig_width: int = 6, fig_height: int = 4,
label_rot_angle: float = 90
):
"""
Bar plots indicating the top-k improvements per class in the PubChemLite classification based on PubChem's TOC.
:param results: pd.DataFrame, containing the Only-MS2 and LC-MS2Struct ranks, PubChemLite classes, and further
information for the spectra in our experiments. Please check, "gather_ranl_changes__csi.py" for the details on
the data-structure. The results for different MS2-scorers are concatenated.
:param ks: scalar or list of scalars, k for which the top-k ranking performance improvements should be analysed.
:param min_class_support: scalar, minimum number of unique molecular structures per PubChemLite class.
:param sharey: string or boolean, indicating whether (and how) the y-axes ranges are synchronized.
:param n_samples: scalar, number of random samples to estimate the top-k accuracy from.
:param topk_method: deprecated
:param fig_width: scalar, width of the figure
:param fig_height: scalar, height of the figure
:param label_rot_angle: scalar, rotation angle of the x-labels
:return:
"""
assert topk_method == "csi", "Only CSI:FingerID top-k accuracy computation is supported."
# Make a deep copy of the input data, e.g. to allow modifications
results = deepcopy(results)
# Get relevant subset
pl_columns = [s for s in results.columns.tolist() if s.startswith("pubchemlite")]
info_columns = [
"correct_structure", "molecule_identifier", "rank_onlyms", "rank_msplrt", "n_cand", "n_isomers", "ms2scorer"
]
results = results \
.filter(items=pl_columns + info_columns, axis=1) \
.rename(mapper={c: c.split("_")[1] for c in pl_columns}, axis=1)
# --- Columns in the subplot ---
# k for the top-k that are plotted as columns in the subplots
if ks is None:
ks = [1, 20]
elif isinstance(ks, int):
ks = [ks]
else:
assert isinstance(ks, list)
n_k = len(ks)
# --- Rows in the subplot correspond to the MS2 scoring methods in the input data ---
if "ms2scorer" not in results.columns:
results = results.assign(ms2scorer="MS$^2$ Scorer")
l_ms2scorer = [ms2scorer for ms2scorer, _ in results.groupby("ms2scorer")]
d_row2scorer = {s: i for i, s in enumerate(l_ms2scorer)}
n_scorer = len(l_ms2scorer)
# Create the Axes-array for plotting
fig, axrr = plt.subplots(
n_scorer, n_k, figsize=(fig_width * n_k, fig_height * n_scorer), squeeze=False, sharey=sharey, sharex="all"
)
# Plot
results_out = []
for ms2scorer, res_sub in results.groupby("ms2scorer"):
for ax_col_idx, k in enumerate(ks):
# Get the axis to draw in
ax = axrr[d_row2scorer[ms2scorer], ax_col_idx]
_res_sub = []
for rep in range(n_samples):
_res = res_sub \
.sample(frac=1, random_state=rep) \
.drop_duplicates("correct_structure") \
.melt(info_columns, var_name="pubchemlite_class", value_name="membership_count")
# We can drop the rows where a molecule is not member of a particular class
_res = _res[_res["membership_count"] > 0] # type: pd.DataFrame
# Compute the top-k accuracies for Only MS and MS + RT
_res = _res \
.groupby("pubchemlite_class") \
.agg({
"rank_onlyms": lambda x: _get_topk(x, k, topk_method),
"rank_msplrt": lambda x: _get_topk(x, k, topk_method),
"n_cand": np.median,
"n_isomers": np.median,
"molecule_identifier": len
}) \
.rename({
"rank_onlyms": "top_k_p_onlyms",
"rank_msplrt": "top_k_p_msplrt",
"molecule_identifier": "n_class_support"
}, axis=1) \
.reset_index()
_res_sub.append(_res)
_res_sub = pd.concat(_res_sub, ignore_index=True)
# Add the top-k improvement in percentage-points
_res_sub = _res_sub.assign(top_k_p_improvement=(_res_sub["top_k_p_msplrt"] - _res_sub["top_k_p_onlyms"]))
# Filter classes without enough support
_res_sub = _res_sub[_res_sub["n_class_support"] >= min_class_support]
if len(_res_sub) == 0:
raise ValueError("No class has enough support.")
sns.barplot(
data=_res_sub, x="pubchemlite_class", y="top_k_p_improvement", ax=ax
)
ax.grid(axis="y")
ax.hlines(0, ax.get_xlim()[0] - 1, ax.get_xlim()[1] + 1, color='k', linestyle="--")
ax.set_title("%s - top-%d" % (ms2scorer, k), fontweight="bold")
ax.bar_label(
ax.containers[0],
labels=[
"%.1f" % _l
for _l in _res_sub.groupby("pubchemlite_class")["top_k_p_onlyms"].mean().tolist()
],
rotation=90, horizontalalignment="center", fmt="%.1f", label_type="edge", padding=10, fontsize=12
)
if d_row2scorer[ms2scorer] == (n_scorer - 1):
ax.set_xticklabels(
[
plt.Text(
_tl.get_position()[0], _tl.get_position()[1],
"%s (n=%d)" %
(
_tl.get_text(),
_res_sub[_res_sub["pubchemlite_class"] == _tl.get_text()]["n_class_support"].iloc[0]
)
)
for _tl in ax.get_xticklabels()
],
rotation=label_rot_angle, horizontalalignment="center", fontsize=12
)
ax.set_xlabel("PubChemLite classification", fontsize=12)
else:
ax.set_xlabel("")
if ax_col_idx == 0:
ax.set_ylabel("Top-k accuracy\nimprovement (%p)", fontsize=12)
else:
ax.set_ylabel("")
results_out.append(
_res_sub
.groupby("pubchemlite_class")
.agg({
"top_k_p_onlyms": np.mean,
"top_k_p_msplrt": np.mean,
"top_k_p_improvement": np.mean,
"n_cand": lambda x: x.iloc[0],
"n_isomers": lambda x: x.iloc[0],
"n_class_support": lambda x: x.iloc[0],
})
.assign(k=k, ms2scorer=ms2scorer)
.reset_index()
)
# Compute the average improvement into actual counts
results_out[-1]["improvement_in_n"] = \
(results_out[-1]["n_class_support"] * results_out[-1]["top_k_p_improvement"]) / 100
# Adjust y-axis range to provide enough space for the labels
_y_add = {1: 1.0, 5: 0.5, 20: 1.75}
for ax_col_idx, _k in enumerate(ks):
for ax in axrr[:, ax_col_idx]:
_y_min, _y_max = ax.get_ylim()
ax.set_ylim(_y_min - _y_add.get(_k, 0.0), _y_max)
plt.tight_layout()
return pd.concat(results_out, ignore_index=True)
# Same color-map as used in the supplementary material when plotting the classyfire class distribution
MY_CLASSYFIRE_CLASSES_COLORMAP = {
'Alkaloids and derivatives': (0.12156862745098039, 0.4666666666666667, 0.7058823529411765, 1.0),
'Benzenoids': (0.6823529411764706, 0.7803921568627451, 0.9098039215686274, 1.0),
'Lignans, neolignans and related compounds': (0.7686274509803922, 0.611764705882353, 0.5803921568627451, 1.0),
'Lipids and lipid-like molecules': (1.0, 0.4980392156862745, 0.054901960784313725, 1.0),
'Nucleosides, nucleotides, and analogues': (0.5490196078431373, 0.33725490196078434, 0.29411764705882354, 1.0),
'Organic acids and derivatives': (1.0, 0.7333333333333333, 0.47058823529411764, 1.0),
'Organic nitrogen compounds': (0.7725490196078432, 0.6901960784313725, 0.8352941176470589, 1.0),
'Organic oxygen compounds': (1.0, 0.596078431372549, 0.5882352941176471, 1.0),
'Organohalogen compounds': (0.5803921568627451, 0.403921568627451, 0.7411764705882353, 1.0),
'Organoheterocyclic compounds': (0.17254901960784313, 0.6274509803921569, 0.17254901960784313, 1.0),
'Other': (0.586082276047674, 0.586082276047674, 0.586082276047674, 1.0),
'Phenylpropanoids and polyketides': (0.8392156862745098, 0.15294117647058825, 0.1568627450980392, 1.0)
}
def plot__02__a(
results: pd.DataFrame, ks: Optional[Union[List[int], int]] = None, min_class_support: int = 50,
colormap_name: str = "fixed", sharey: str = "all", cf_level: str = "superclass", n_samples: int = 50,
topk_method: str = "csi", fig_width: int = 6, fig_height: int = 4, label_rot_angle: float = 90
):
"""
Bar plots indicating the top-k improvements per ClassyFire compound class.
:param results: pd.DataFrame, containing the Only-MS2 and LC-MS2Struct ranks, ClassyFire classes, and further
information for the spectra in our experiments. Please check, "gather_ranl_changes__csi.py" for the details on
the data-structure. The results for different MS2-scorers are concatenated.
:param ks: scalar or list of scalars, k for which the top-k ranking performance improvements should be analysed.
:param min_class_support: scalar, minimum number of unique molecular structures per ClassyFire class.
:param colormap_name: string, either the name of a matplotlib color-map, or "fixed". If "fixed" than pre-defined
colors are used for the ClassyFire super-classes.
:param sharey: string or boolean, indicating whether (and how) the y-axes ranges are synchronized.
:param cf_level: string, Classyfire level to analyse.
:param n_samples: scalar, number of random samples to estimate the top-k accuracy from.
:param topk_method: deprecated
:param fig_width: scalar, width of the figure
:param fig_height: scalar, height of the figure
:param label_rot_angle: scalar, rotation angle of the x-labels
:return:
"""
def _aggregate_and_filter_classyfire_classes(df, min_class_support, cf_level):
"""
Task: Group and aggregate the results by the ClassyFire class-level and determine the support for each class.
Then, remove all classes with too little support. Purpose is to get the "relevant" class and superclass
relationships to determine the colors and orders for the plotting.
"""
# We consider only unique molecular structures to compute the CF class support
tmp = df.drop_duplicates("correct_structure")
# Group by the ClassyFire level
tmp = tmp.groupby("classyfire_%s" % cf_level)
if cf_level == "class":
tmp = tmp.aggregate({
"molecule_identifier": lambda x: len(x),
"classyfire_superclass": lambda x: x.iloc[0]
})
elif cf_level == "superclass":
tmp = tmp.aggregate({
"molecule_identifier": lambda x: len(x),
"classyfire_class": lambda x: ",".join([xi for xi in x if not pd.isna(xi)])
})
else:
raise ValueError("Invalid ClassyFire level: '%s'" % cf_level)
tmp = tmp \
.rename({"molecule_identifier": "n_class_support"}, axis=1) \
.reset_index() \
.sort_values(by="classyfire_superclass")
return tmp[tmp["n_class_support"] >= min_class_support]
assert cf_level in ["superclass", "class"], "Invalid or unsupported ClassyFire class level: '%s'." % cf_level
assert topk_method == "csi", "Only CSI:FingerID top-k accuracy computation is supported."
# Make a deep copy of the input data, e.g. to allow modifications
results = deepcopy(results)
# Drop the rows for which the desired ClassyFire class has no value (NaN), e.g. some examples might not have a
# 'class'-level annotation.
results = results.dropna(subset=["classyfire_%s" % cf_level])
# --- Columns in the subplot ---
# k for the top-k that are plotted as columns in the subplots
if ks is None:
ks = [1, 20]
elif isinstance(ks, int):
ks = [ks]
else:
assert isinstance(ks, list)
n_k = len(ks)
# --- Rows in the subplot correspond to the MS2 scoring methods in the input data ---
if "ms2scorer" not in results.columns:
results = results.assign(ms2scorer="MS2 Scorer")
l_ms2scorer = [ms2scorer for ms2scorer, _ in results.groupby("ms2scorer")]
d_row2scorer = {s: i for i, s in enumerate(l_ms2scorer)}
n_scorer = len(l_ms2scorer)
# Create the Axes-array for plotting
fig, axrr = plt.subplots(
n_scorer, n_k, figsize=(fig_width * n_k, fig_height * n_scorer), squeeze=False, sharey=sharey, sharex="all"
)
# Get class-level colors based on superclass-level
cf_cls_stats = _aggregate_and_filter_classyfire_classes(results, min_class_support, cf_level)
LOGGER.debug(
"n_superclass = %d, n_class = %d" %
(cf_cls_stats["classyfire_superclass"].nunique(), cf_cls_stats["classyfire_class"].nunique())
)
superlevel = {}
palette = {}
order = []
if cf_level == "class":
for idx, (cf_sc, tmp) in enumerate(cf_cls_stats.groupby("classyfire_superclass")):
for cf_c in sorted(tmp["classyfire_class"].unique()):
if colormap_name == "fixed":
palette[cf_c] = MY_CLASSYFIRE_CLASSES_COLORMAP[cf_sc]
else:
palette[cf_c] = cm.get_cmap(colormap_name)(idx)
order.append(cf_c)
superlevel[cf_c] = cf_sc
elif cf_level == "superclass":
for idx, (cf_sc, _) in enumerate(cf_cls_stats.groupby("classyfire_superclass")):
if colormap_name == "fixed":
palette[cf_sc] = MY_CLASSYFIRE_CLASSES_COLORMAP[cf_sc]
else:
palette[cf_sc] = cm.get_cmap(colormap_name)(idx)
order.append(cf_sc)
else:
raise ValueError("Invalid ClassyFire level: '%s'" % cf_level)
# Plot
results_out = []
for ms2scorer, res_sub in results.groupby("ms2scorer"):
for ax_col_idx, k in enumerate(ks):
# Get the axis to draw in
ax = axrr[d_row2scorer[ms2scorer], ax_col_idx]
# Compute the top-k accuracies for Only MS and MS + RT
_res_sub = []
for rep in range(n_samples):
_res_sub.append(
res_sub
.sample(frac=1, random_state=rep)
.drop_duplicates("correct_structure")
.groupby("classyfire_%s" % cf_level)
.agg({
"rank_onlyms": lambda x: _get_topk(x, k, topk_method),
"rank_msplrt": lambda x: _get_topk(x, k, topk_method),
"n_cand": np.median,
"n_isomers": lambda x: "min=%d, max=%d, avg=%.1f, med=%.1f" % (
np.min(x), np.max(x), np.mean(x), np.median(x)
),
"molecule_identifier": len
})
.rename({
"rank_onlyms": "top_k_p_onlyms",
"rank_msplrt": "top_k_p_msplrt",
"molecule_identifier": "n_class_support"
}, axis=1)
.reset_index()
)
_res_sub = pd.concat(_res_sub, ignore_index=True)
# Add the top-k improvement in percentage-points
_res_sub = _res_sub.assign(top_k_p_improvement=(_res_sub["top_k_p_msplrt"] - _res_sub["top_k_p_onlyms"]))
# Filter classes without enough support
_res_sub = _res_sub[_res_sub["n_class_support"] >= min_class_support]
if len(_res_sub) == 0:
raise ValueError("No class has enough support.")
ax = sns.barplot(
data=_res_sub, x="classyfire_%s" % cf_level, y="top_k_p_improvement", ax=ax, palette=palette,
order=order, seed=1020
)
ax.grid(axis="y")
ax.hlines(0, ax.get_xlim()[0] - 1, ax.get_xlim()[1] + 1, color='k', linestyle="--")
ax.set_title("%s - top-%d" % (ms2scorer, k), fontweight="bold")
ax.bar_label(
ax.containers[0],
labels=[
"%.1f" % _l
for _l in _res_sub.groupby("classyfire_%s" % cf_level)["top_k_p_onlyms"].mean().tolist()
],
rotation=90, horizontalalignment="center", fmt="%.1f", label_type="edge", padding=10, fontsize=12
)
if d_row2scorer[ms2scorer] == (n_scorer - 1):
ax.set_xticklabels(
[
plt.Text(
_tl.get_position()[0], _tl.get_position()[1],
"%s (n=%d)" %
(
_tl.get_text(),
_res_sub[_res_sub["classyfire_%s" % cf_level] == _tl.get_text()]["n_class_support"].iloc[0]
)
)
for _tl in ax.get_xticklabels()
],
rotation=label_rot_angle, horizontalalignment="center", fontsize=12
)
ax.set_xlabel("ClassyFire: %s" % {"superclass": "Super-class", "class": "Class"}[cf_level], fontsize=12)
else:
ax.set_xlabel("")
if ax_col_idx == 0:
ax.set_ylabel("Top-k accuracy\nimprovement (%p)", fontsize=12)
else:
ax.set_ylabel("")
results_out.append(
_res_sub
.groupby("classyfire_%s" % cf_level)
.agg({
"top_k_p_onlyms": np.mean,
"top_k_p_msplrt": np.mean,
"top_k_p_improvement": np.mean,
"n_cand": lambda x: x.iloc[0],
"n_isomers": lambda x: x.iloc[0],
"n_class_support": lambda x: x.iloc[0],
})
.assign(k=k, ms2scorer=ms2scorer)
.reset_index()
)
# Compute the average improvement into actual counts
results_out[-1]["improvement_in_n"] = \
(results_out[-1]["n_class_support"] * results_out[-1]["top_k_p_improvement"]) / 100
# Adjust y-axis range to provide enough space for the labels
_y_add = {1: 1.25, 5: 0.9, 20: 1.5}
for ax_col_idx, _k in enumerate(ks):
for ax in axrr[:, ax_col_idx]:
_y_min, _y_max = ax.get_ylim()
ax.set_ylim(_y_min - _y_add.get(_k, 0.0), _y_max)
plt.tight_layout()
return pd.concat(results_out, ignore_index=True), superlevel
def _get_res_set(df: pd.DataFrame):
return set((
(row["eval_indx"], row["dataset"])
for index, row in df.loc[:, ["eval_indx", "dataset"]].drop_duplicates().iterrows()
))
def _restrict_df(df: pd.DataFrame, res_set: set):
if df is None:
return None
df_out = [row for _, row in df.iterrows() if (row["eval_indx"], row["dataset"]) in res_set]
return pd.DataFrame(df_out)
def _process_dfs__01(res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020, raise_on_missing_results):
n_scorer = len(res__baseline)
res_sets = []
for i in range(n_scorer):
restrict_results = False
# Only MS2
_res_set_baseline = _get_res_set(res__baseline[i])
res_sets.append(_res_set_baseline)
# SSVM
_res = _get_res_set(res__ssvm[i])
if _res != _res_set_baseline:
if raise_on_missing_results:
raise ValueError("SSVM has missing results!")
else:
res_sets[-1] &= _res
restrict_results = True
# RT filtering
_res = _get_res_set(res__rtfilter[i])
if _res != _res_set_baseline:
if raise_on_missing_results:
raise ValueError("RT filtering has missing results!")
else:
res_sets[-1] &= _res
restrict_results = True
# XLogP3
_res = _get_res_set(res__xlogp3[i])
if _res != _res_set_baseline:
if raise_on_missing_results:
raise ValueError("XLogP3 has missing results!")
else:
res_sets[-1] &= _res
restrict_results = True
# Bach et al. (2020)
_res = _get_res_set(res__bach2020[i])
if _res != _res_set_baseline:
if raise_on_missing_results:
raise ValueError("Bach et al. (2020) has missing results!")
else:
res_sets[-1] &= _res
restrict_results = True
if restrict_results:
res__baseline[i] = _restrict_df(res__baseline[i], res_sets[i])
res__ssvm[i] = _restrict_df(res__ssvm[i], res_sets[i])
res__rtfilter[i] = _restrict_df(res__rtfilter[i], res_sets[i])
res__xlogp3[i] = _restrict_df(res__xlogp3[i], res_sets[i])
res__bach2020[i] = _restrict_df(res__bach2020[i], res_sets[i])
# Sort results so that the rows would match
for i in range(n_scorer):
res__baseline[i] = res__baseline[i].sort_values(by=["dataset", "eval_indx", "k"])
res__ssvm[i] = res__ssvm[i].sort_values(by=["dataset", "eval_indx", "k"])
res__rtfilter[i] = res__rtfilter[i].sort_values(by=["dataset", "eval_indx", "k"])
res__xlogp3[i] = res__xlogp3[i].sort_values(by=["dataset", "eval_indx", "k"])
res__bach2020[i] = res__bach2020[i].sort_values(by=["dataset", "eval_indx", "k"])
return res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020
def plot__01__a(
res__baseline: List[pd.DataFrame],
res__ssvm: List[pd.DataFrame],
res__rtfilter: List[pd.DataFrame],
res__xlogp3: List[pd.DataFrame],
res__bach2020: List[pd.DataFrame],
aspect: str = "landscape",
max_k: int = 20,
weighted_average: bool = False,
raise_on_missing_results: bool = True,
verbose: bool = False
):
"""
Plot comparing the top-k accuracy performance for k in {1, ..., max_k} of the different scoring methods:
- baseline: Only MS2 information is used
- ssvm: Proposed Structured Support Vector Regression (SSVM) model
- rtfilter: Candidate filtering using retention time errors
- xlogp3: Candidate re-ranking using predicted XLogP3 values
- bach2020: Retention order and MS2 score integration framework by Bach et al. 2020
The for each scoring method a list of dataframes is provided. Each DataFrame has the following structure:
k top_k_method scoring_method correct_leq_k seq_length n_models eval_indx dataset top_k_acc ds lloss_mode mol_feat mol_id ms2scorer ssvm_flavor
1 csi Only-MS2 3.000000 50 8 0 AC_003 6.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
2 csi Only-MS2 5.000000 50 8 0 AC_003 10.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
3 csi Only-MS2 7.000000 50 8 0 AC_003 14.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
4 csi Only-MS2 9.000000 50 8 0 AC_003 18.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
5 csi Only-MS2 11.000000 50 8 0 AC_003 22.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
...
whereby the "scoring_method" differs.
Each list element corresponds to a different MS2 base-scorer, e.g. CFM-ID, MetFrag, ...
:param res__baseline: list of dataframe, containing the ranking results for Only MS2.
:param res__ssvm: list of dataframe, containing the ranking results for the SSVM approach.
:param res__rtfilter: list of dataframe, containing the RT filtering results.
:param res__xlogp3: list of dataframe, containing the XLogP3 re-scoring results.
:param res__bach2020: list of dataframe, containing the results achieved by Bach et al.'s method.
:param aspect: string, indicating which layout for the plot should be used:
"landscape":
CFMID METFRAG SIRIUS
____ ____ ____
| | | | ... | | Top-k
|____| |____| |____|
____ ____ ____
| | | | ... | | Top-k improvement over the baseline
|____| |____| |____|
"portrait":
Top-k Top-k improvement over the baseline
____ ____
CFMID | | | |
|____| |____|
____ ____
MEFRAG | | | |
|____| |____|
.
.
.
____ ____
SIRIUS | | | |
|____| |____|
:param max_k: scalar, what is the maximum k value for the top-k curve plot.
:param weighted_average: boolean, indicating whether the average the top-k accuracy should be first computed within
each dataset and subsequently averaged across the datasets. If False, than all samples are treated equally and
simply averaged directly across all datasets.
:param raise_on_missing_results: boolean, indicating whether an error should be raised if results are missing. If
False, than only those results which are available for all scoring methods of a particular MS2 base-scorer are
considered for the plots.
:param verbose: boolean, indicating whether all kinds of stuff should be printed, which somehow can be helpful for
debugging.
:return: pd.DataFrame, data shown in the plot for publication.
"""
def _acc_info_printer(baseline, other, k):
print(
"\ttop-%d: baseline = %.1f%%, other = %.1f%%, improvement = %.1f%%p, gain = %.1f%%, n = %.1f" %
(
k,
baseline["top_k_acc"][other["k"] == k],
other["top_k_acc"][other["k"] == k],
(other["top_k_acc"] - baseline["top_k_acc"])[other["k"] == k],
((other["top_k_acc"] / baseline["top_k_acc"])[other["k"] == k] - 1) * 100,
(other["correct_leq_k"] - baseline["correct_leq_k"])[other["k"] == k]
)
)
assert aspect in ["landscape", "portrait"], "Invalid aspect value: '%s'" % aspect
# Number of MS2 scorers must be equal
assert len(res__baseline) == len(res__ssvm)
assert len(res__baseline) == len(res__rtfilter)
assert len(res__baseline) == len(res__xlogp3)
assert len(res__baseline) == len(res__bach2020)
n_scorer = len(res__baseline)
# There should be only one scoring and one top-k accuracy computation method in each dataframe
for k in ["scoring_method", "top_k_method", "ms2scorer"]:
for i in range(n_scorer):
assert res__baseline[i][k].nunique() == 1
assert res__ssvm[i][k].nunique() == 1
assert res__rtfilter[i][k].nunique() == 1
assert res__xlogp3[i][k].nunique() == 1
assert res__bach2020[i][k].nunique() == 1
# For the SSVM all results should be 8 SSVM models
for i in range(n_scorer):
assert np.all(res__ssvm[i]["n_models"] == 8), "There seems to be SSVM models missing."
# Get all available results and restrict them if needed by only using the result intersection
res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020 = _process_dfs__01(
res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020, raise_on_missing_results
)
# Get a new figure
if aspect == "portrait":
_n_rows = n_scorer
_n_cols = 2
_figsize = (9, 3 * n_scorer)
else: # landscape
_n_rows = 2
_n_cols = n_scorer
_figsize = (4.5 * n_scorer, 5.75)
fig, axrr = plt.subplots(_n_rows, _n_cols, figsize=_figsize, sharex="all", sharey=False, squeeze=False)
# Set some plot properties
k_ticks = np.arange(0, max_k + 1, 5)
k_ticks[0] = 1
# For Machine Intelligence we need to provide the raw-data for the plot
res_out = []
# Plot Top-k curve
if verbose:
print("We expect 17500 result rows")
for idx, (a, b, c, d, e) in enumerate(zip(res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020)):
assert a["ms2scorer"].unique().item() == b["ms2scorer"].unique().item()
assert a["ms2scorer"].unique().item() == c["ms2scorer"].unique().item()
assert a["ms2scorer"].unique().item() == d["ms2scorer"].unique().item()
assert a["ms2scorer"].unique().item() == e["ms2scorer"].unique().item()
if verbose:
print("Rows (MS2-scorer='%s'):" % a["ms2scorer"].unique().item())
print("Number of samples: %d" % (a["k"] == 1).sum())
# Get current axis and set labels
if aspect == "portrait":
# first column
ax = axrr[idx, 0]
ax.set_title(a["ms2scorer"].unique().item(), fontweight="bold")
ax.set_ylabel("Top-k accuracy (%)", fontsize=12)
# second column
ax2 = axrr[idx, 1]
ax2.set_title(a["ms2scorer"].unique().item(), fontweight="bold")
ax2.set_ylabel("Top-k accuracy\nimprovement (%p)", fontsize=12)
else:
# first row
ax = axrr[0, idx]
ax.set_title(a["ms2scorer"].unique().item(), fontweight="bold")
axrr[0, 0].set_ylabel("Top-k accuracy (%)", fontsize=12)
# second row
ax2 = axrr[1, idx]
axrr[1, 0].set_ylabel("Top-k accuracy\nimprovement (%p)", fontsize=12)
# Baseline
if verbose:
print("Baseline: ", len(a))
if weighted_average:
bl = a[a["k"] <= max_k].groupby(["dataset", "k"]).mean().reset_index().groupby("k").mean().reset_index()
else:
bl = a[a["k"] <= max_k].groupby("k").mean().reset_index()
ax.step(bl["k"], bl["top_k_acc"], where="post", label=a["scoring_method"].unique().item(), color="black")
ax2.hlines(0, 1, max_k, colors="black", label=a["scoring_method"].unique().item())
res_out += list(zip(
bl["k"], bl["top_k_acc"], [a["scoring_method"].unique().item()] * max_k,
[a["ms2scorer"].unique().item()] * max_k
))
# ---
# SSVM
if verbose:
print("SSVM: ", len(b))
if weighted_average:
tmp = b[b["k"] <= max_k].groupby(["dataset", "k"]).mean().reset_index().groupby("k").mean().reset_index()
else:
tmp = b[b["k"] <= max_k].groupby("k").mean().reset_index()
ax.step(tmp["k"], tmp["top_k_acc"], where="post", label=b["scoring_method"].unique().item(), color="blue")
assert np.all(tmp["k"] == bl["k"])
ax2.step(
tmp["k"], tmp["top_k_acc"] - bl["top_k_acc"], where="post", label=b["scoring_method"].unique().item(),
color="blue"
)
if verbose:
for _k in [1, 20]:
_acc_info_printer(bl, tmp, _k)
res_out += list(zip(
tmp["k"], tmp["top_k_acc"], [b["scoring_method"].unique().item()] * max_k,
[a["ms2scorer"].unique().item()] * max_k
))
# ---
# RT filtering
if verbose:
print("RT filtering: ", len(c))
if weighted_average:
tmp = c[c["k"] <= max_k].groupby(["dataset", "k"]).mean().reset_index().groupby("k").mean().reset_index()
else:
tmp = c[c["k"] <= max_k].groupby("k").mean().reset_index()
ax.step(tmp["k"], tmp["top_k_acc"], where="post", label=c["scoring_method"].unique().item(), color="red")
assert np.all(tmp["k"] == bl["k"])
ax2.step(
tmp["k"], tmp["top_k_acc"] - bl["top_k_acc"], where="post", label=c["scoring_method"].unique().item(),
color="red"
)
if verbose:
for _k in [1, 20]:
_acc_info_printer(bl, tmp, _k)
res_out += list(zip(
tmp["k"], tmp["top_k_acc"], [c["scoring_method"].unique().item()] * max_k,
[a["ms2scorer"].unique().item()] * max_k
))
# ---
# XLogP3
if verbose:
print("XLogP3: ", len(d))
if weighted_average:
tmp = d[d["k"] <= max_k].groupby(["dataset", "k"]).mean().reset_index().groupby("k").mean().reset_index()
else:
tmp = d[d["k"] <= max_k].groupby("k").mean().reset_index()
ax.step(tmp["k"], tmp["top_k_acc"], where="post", label=d["scoring_method"].unique().item(), color="green")
assert np.all(tmp["k"] == bl["k"])
ax2.step(
tmp["k"], tmp["top_k_acc"] - bl["top_k_acc"], where="post", label=d["scoring_method"].unique().item(),
color="green"
)
if verbose:
for _k in [1, 20]:
_acc_info_printer(bl, tmp, _k)
res_out += list(zip(
tmp["k"], tmp["top_k_acc"], [d["scoring_method"].unique().item()] * max_k,
[a["ms2scorer"].unique().item()] * max_k
))
# ---
# Bach et al. (2020)
if verbose:
print("Bach et al. (2020)", len(e))
if weighted_average:
tmp = e[e["k"] <= max_k].groupby(["dataset", "k"]).mean().reset_index().groupby(
"k").mean().reset_index()
else:
tmp = e[e["k"] <= max_k].groupby("k").mean().reset_index()
ax.step(tmp["k"], tmp["top_k_acc"], where="post", label=e["scoring_method"].unique().item(), color="orange")
assert np.all(tmp["k"] == bl["k"])
ax2.step(
tmp["k"], tmp["top_k_acc"] - bl["top_k_acc"], where="post", label=e["scoring_method"].unique().item(),
color="orange"
)
if verbose:
for _k in [1, 20]:
_acc_info_printer(bl, tmp, _k)
res_out += list(zip(
tmp["k"], tmp["top_k_acc"], [e["scoring_method"].unique().item()] * max_k,
[a["ms2scorer"].unique().item()] * max_k
))
# ---
# Set some further axis properties
ax.set_xticks(k_ticks)
ax2.set_xticks(k_ticks)
ax.grid(axis="y")
ax2.grid(axis="y")
if (aspect == "portrait") and (idx == (n_scorer - 1)):
ax.set_xlabel("k")
ax2.set_xlabel("k")
elif aspect == "landscape":
ax2.set_xlabel("k")
# There should be only a single legend in the figure
# TODO: Would be nice to get that one below the plots
axrr[0, 0].legend()
plt.tight_layout()
return pd.DataFrame(res_out, columns=["k", "avg_top_k_acc", "scoring_method", "ms2scorer"])
def _compute_color(baseline, other, ctype):
if ctype.startswith("gain"):
cvalue = (other / baseline) - 1
if ctype.endswith("perc"):
cvalue *= 100
elif ctype == "improvement":
cvalue = other - baseline
else:
raise ValueError("Invalid ctype: '%s'." % ctype)
return cvalue
def _reshape_output(df_d):
return [
_df.melt(ignore_index=False, value_name="top_k_acc").reset_index().assign(ms2scorer=_ms2scorer_i, k=_k)
for (_ms2scorer_i, _k), _df in df_d.items()
]
def plot__01__b(
res__baseline: List[pd.DataFrame],
res__ssvm: List[pd.DataFrame],
res__rtfilter: List[pd.DataFrame],
res__xlogp3: List[pd.DataFrame],
res__bach2020: List[pd.DataFrame],
ks: Optional[List[int]] = None,
ctype: str = "improvement",
weighted_average: bool = False,
raise_on_missing_results: bool = True,
label_format: str = ".0f",
verbose: bool = False
):
"""
Plot to illustrate the performance difference between Only MS2 and the four (4) different score integration
approaches. The input structure is the same as for "plot__01__a".
:param res__baseline: list of dataframe, containing the ranking results for Only MS2.
:param res__ssvm: list of dataframe, containing the ranking results for the SSVM approach.
:param res__rtfilter: list of dataframe, containing the RT filtering results.
:param res__xlogp3: list of dataframe, containing the XLogP3 re-scoring results.
:param res__bach2020: list of dataframe, containing the results achieved by Bach et al.'s method.
:param ks: list of scalars, top-k values to plot. By default, the variable is set to [1, 20], which means that the
top-1 and top-20 values will be plotted.
:param ctype: string, which statistic should be encoded using the color of the heatmap plot. Choises are:
"improvement": Difference between top-k (score integration) and top-k (baseline) in percentage points.
"gain": Performance gain of top-k (score integration) over top-k (baseline)
"gain_perc": Performance gain of top-k (score integration) over top-k (baseline) in percentages
:param weighted_average: boolean, indicating whether the average the top-k accuracy should be first computed within
each dataset and subsequently averaged across the datasets. If False, than all samples are treated equally and
simply averaged directly across all datasets.
:param raise_on_missing_results: boolean, indicating whether an error should be raised if results are missing. If
False, than only those results which are available for all scoring methods of a particular MS2 base-scorer are
considered for the plots.
:param label_format: string, format string for the labels. Default: Rounded to full number.
:param verbose: boolean, indicating whether all kinds of stuff should be printed, which somehow can be helpful for
debugging.
:return: pd.DataFrame, data shown in the plot for publication.
"""
assert ctype in ["improvement", "gain", "gain_perc"], "Invalid ctype value: '%s'" % ctype
ctype_labels = {
"improvement": "Top-k acc. improvement (%p)",
"gain": "Performance gain",
"gain_perc": "Performance gain (%)"
}
# Total number of scoring methods in our manuscript
n_methods = 5
# Number of MS2 scorers must be equal
assert len(res__baseline) == len(res__ssvm)
assert len(res__baseline) == len(res__rtfilter)
assert len(res__baseline) == len(res__xlogp3)
assert len(res__baseline) == len(res__bach2020)
n_scorer = len(res__baseline)
# There should be only one scoring and one top-k accuracy computation method in each dataframe
for k in ["scoring_method", "top_k_method", "ms2scorer"]:
for i in range(n_scorer):
assert res__baseline[i][k].nunique() == 1
assert res__ssvm[i][k].nunique() == 1
assert res__rtfilter[i][k].nunique() == 1
assert res__xlogp3[i][k].nunique() == 1
assert res__bach2020[i][k].nunique() == 1
# For the SSVM all results should be 8 SSVM models
for i in range(n_scorer):
assert np.all(res__ssvm[i]["n_models"] == 8), "There seems to be SSVM models missing."
# Get all available results and restrict them if needed by only using the result intersection
res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020 = _process_dfs__01(
res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020, raise_on_missing_results
)
# Get number of datasets
datasets = [res__baseline[i]["dataset"].unique().tolist() for i in range(n_scorer)]
# Get a new figure
fig, axrr = plt.subplots(n_scorer, len(ks), figsize=(20, 5 * n_scorer), sharex=False, sharey="row", squeeze=False)
# Plot Top-k curve
if verbose:
print("We expect 17500 result rows")
# For Machine Intelligence we need to write out the content of the figure
_label_df = {}
_color_df = {}
# Do the plotting ...
for i, _res in enumerate(zip(res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020)):
_ms2scorer_i = _res[0]["ms2scorer"].unique().item()
assert _ms2scorer_i == _res[1]["ms2scorer"].unique().item()
assert _ms2scorer_i == _res[2]["ms2scorer"].unique().item()
assert _ms2scorer_i == _res[3]["ms2scorer"].unique().item()
assert _ms2scorer_i == _res[4]["ms2scorer"].unique().item()
if verbose:
print("Rows (MS2-scorer='%s'):" % _ms2scorer_i)
print("Number of samples: %d" % (_res[0]["k"] == 1).sum())
# Top-k accuracy matrices: (1) label matrix and (2) color encoding matrix
lab_val_mat = np.full((len(ks), n_methods, len(datasets[i]) + 1), fill_value=np.nan)
col_val_mat = np.full((len(ks), n_methods, len(datasets[i]) + 1), fill_value=np.nan)
# shape = (
# number_of_ks_to_plot,
# number_of_score_integration_methods,
# number_of_datasets_plus_avg
# )
lab_val_d = {}
for j, k in enumerate(ks):
# Get current axis
ax = axrr[i, j]
# i: Each MS2 scorer is plotted into its own row
# j: Each top-k is plotted into its own column
for l, ds in enumerate(datasets[i]):
# Top-k accuracy as label
for m in range(n_methods):
# Get the top-k values for the current dataset (= MassBank group) and the current value of k
# (top-k). This might be several values, depending on the number of evaluation samples in each
# dataset.
_top_k_values = _res[m][(_res[m]["dataset"] == ds) & (_res[m]["k"] == k)]["top_k_acc"].values
# As label, we use the average performance (WITHIN DATASET).
lab_val_mat[j, m, l] = np.mean(_top_k_values)
if not weighted_average:
lab_val_d[(j, m, l)] = _top_k_values
# Performance gain or improvement as color
for m in range(n_methods):
# Note: The first score integration method is Only MS2 (= baseline)
col_val_mat[j, m, l] = _compute_color(
baseline=lab_val_mat[j, 0, l], other=lab_val_mat[j, m, l], ctype=ctype
)
# Compute average performance (ACROSS THE DATASETS)
if weighted_average:
lab_val_mat[j, :, -1] = np.mean(lab_val_mat[j, :, :-1], axis=1)
else:
for m in range(n_methods):
lab_val_mat[j, m, -1] = np.mean(
np.concatenate(
[lab_val_d[(j, m, l)] for l in range(len(datasets[i]))]
)
)
for m in range(n_methods):
col_val_mat[j, m, -1] = _compute_color(
baseline=lab_val_mat[j, 0, -1], other=lab_val_mat[j, m, -1], ctype=ctype
)
# Wrap the matrices into dataframes
_index = pd.Index(
data=[_res[m]["scoring_method"].unique().item() for m in range(n_methods)],
name="scoring_method"
)
_columns = pd.Index(data=datasets[i] + ["AVG."], name="dataset")
_label_df[(_ms2scorer_i, k)] = pd.DataFrame(lab_val_mat[j], index=_index, columns=_columns)
_color_df[(_ms2scorer_i, k)] =
|
pd.DataFrame(col_val_mat[j], index=_index, columns=_columns)
|
pandas.DataFrame
|
'''
We want to answer the question:
For the same drug target in each dataset, what is the correllation of the response variables.
'''
import sys
sys.path.insert(1, './FULL_MODEL_001/')
import pickle
from matplotlib import pyplot as plt
import numpy as np
from config import * # params stored here
import utils
import pandas as pd
from torch.utils import data
import statsmodels.api as sm
from scipy.stats import zscore
def mean_na_remove(x):
x = x[~np.isnan(x)]
return np.mean(x)
def lm(x1, x2):
try:
x = np.linspace(min(x1),max(x1),10)
x1 = sm.add_constant(x1)
model = sm.OLS(x2, x1)
results = model.fit()
y = results.predict(sm.add_constant(x))
p = results.f_test(np.identity(2)).pvalue
return p, x, y, results.params[1]
except:
raise
return -1, [0], [0]
if __name__ == '__main__':
with open('./../data_pytorch/split_label_dict.pkl', 'rb') as f:
label_dict = pickle.load(f)
targets = {'dataset' : [], 'target' : [], 'response' : [], 'expr' : []}
for i,dataset in enumerate([dataset for dataset in params['RESP_TYPES']]):
test = label_dict[dataset]['test']
gen = data.DataLoader(utils.DrugExpressionDataset(test, root_dir='./../data_pytorch/tensors/', return_response_type=True), **{'batch_size':1, 'shuffle':False,'num_workers':0})
ii = 0
for X,y,resp_type,resp_selector in gen:
ii+=X.size(0)
#if ii > 1000:
# break
if ii/X.size(0) % 1000 == 0:
print(f'predicting {dataset} ...[{ii}/{len(gen.dataset)}]', end='\r')
targets['dataset'].append(dataset)
targets['target'].append(hash(str(X.numpy()[:,1])))
targets['response'].append(y.numpy()[0])
targets['expr'].append(hash(str(np.round(X.numpy()[:,0], decimals=0))))
print()
df =
|
pd.DataFrame(targets)
|
pandas.DataFrame
|
# Copyright 2019 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from moonshot import MoonshotML
from moonshot.commission import PerShareCommission
from quantrocket.fundamental import get_sharadar_fundamentals_reindexed_like
from quantrocket import get_prices
from quantrocket.master import get_securities_reindexed_like
class USStockCommission(PerShareCommission):
BROKER_COMMISSION_PER_SHARE = 0.005
class TheKitchenSinkML(MoonshotML):
CODE = "kitchensink-ml"
DB = "sharadar-us-stk-1d"
DB_FIELDS = ["Close", "Volume"]
BENCHMARK_DB = "market-1d"
SPY_SID = "FIBBG000BDTBL9"
VIX_SID = "IB13455763"
TRIN_SID = "IB26718743"
BENCHMARK = SPY_SID
DOLLAR_VOLUME_TOP_N_PCT = 60
DOLLAR_VOLUME_WINDOW = 90
MODEL = None
LOOKBACK_WINDOW = 252
COMMISSION_CLASS = USStockCommission
def prices_to_features(self, prices):
closes = prices.loc["Close"]
features = {}
print("adding fundamental features")
self.add_fundamental_features(prices, features)
print("adding quality features")
self.add_quality_features(prices, features)
print("adding price and volume features")
self.add_price_and_volume_features(prices, features)
print("adding techical indicator features")
self.add_technical_indicator_features(prices, features)
print("adding securities master features")
self.add_securities_master_features(prices, features)
print("adding market features")
self.add_market_features(prices, features)
# Target to predict: next week return
one_week_returns = (closes - closes.shift(5)) / closes.shift(5).where(closes.shift(5) > 0)
targets = one_week_returns.shift(-5)
return features, targets
def add_fundamental_features(self, prices, features):
"""
Fundamental features:
- Enterprise multiple
- various quarterly values and ratios
- various trailing-twelve month values and ratios
"""
closes = prices.loc["Close"]
# enterprise multiple
fundamentals = get_sharadar_fundamentals_reindexed_like(
closes,
fields=["EVEBIT", "EBIT"],
dimension="ART")
enterprise_multiples = fundamentals.loc["EVEBIT"]
ebits = fundamentals.loc["EBIT"]
# Ignore negative earnings
enterprise_multiples = enterprise_multiples.where(ebits > 0)
features["enterprise_multiples_ranks"] = enterprise_multiples.rank(axis=1, pct=True).fillna(0.5)
# Query quarterly fundamentals
fundamentals = get_sharadar_fundamentals_reindexed_like(
closes,
dimension="ARQ", # As-reported quarterly reports
fields=[
"CURRENTRATIO", # Current ratio
"DE", # Debt to Equity Ratio
"PB", # Price to Book Value
"TBVPS", # Tangible Asset Book Value per Share
"MARKETCAP",
])
for field in fundamentals.index.get_level_values("Field").unique():
features["{}_ranks".format(field)] = fundamentals.loc[field].rank(axis=1, pct=True).fillna(0.5)
# Query trailing-twelve-month fundamentals
fundamentals = get_sharadar_fundamentals_reindexed_like(
closes,
dimension="ART", # As-reported trailing-twelve-month reports
fields=[
"ASSETTURNOVER", # Asset Turnover
"EBITDAMARGIN", # EBITDA Margin
"EQUITYAVG", # Average Equity
"GROSSMARGIN", # Gross Margin
"NETMARGIN", # Profit Margin
"PAYOUTRATIO", # Payout Ratio
"PE", # Price Earnings Damodaran Method
"PE1", # Price to Earnings Ratio
"PS", # Price Sales (Damodaran Method)
"PS1", # Price to Sales Ratio
"ROA", # Return on Average Assets
"ROE", # Return on Average Equity
"ROS", # Return on Sales
])
for field in fundamentals.index.get_level_values("Field").unique():
features["{}_ranks".format(field)] = fundamentals.loc[field].rank(axis=1, pct=True).fillna(0.5)
def add_quality_features(self, prices, features):
"""
Adds quality features, based on the Piotroski F-score.
"""
closes = prices.loc["Close"]
# Step 1: query relevant indicators
fundamentals = get_sharadar_fundamentals_reindexed_like(
closes,
dimension="ART", # As-reported TTM reports
fields=[
"ROA", # Return on assets
"ASSETS", # Total Assets
"NCFO", # Net Cash Flow from Operations
"DE", # Debt to Equity Ratio
"CURRENTRATIO", # Current ratio
"SHARESWA", # Outstanding shares
"GROSSMARGIN", # Gross margin
"ASSETTURNOVER", # Asset turnover
])
return_on_assets = fundamentals.loc["ROA"]
total_assets = fundamentals.loc["ASSETS"]
operating_cash_flows = fundamentals.loc["NCFO"]
leverages = fundamentals.loc["DE"]
current_ratios = fundamentals.loc["CURRENTRATIO"]
shares_out = fundamentals.loc["SHARESWA"]
gross_margins = fundamentals.loc["GROSSMARGIN"]
asset_turnovers = fundamentals.loc["ASSETTURNOVER"]
# Step 2: many Piotroski F-score components compare current to previous
# values, so get DataFrames of previous values
# Step 2.a: get a boolean mask of the first day of each newly reported fiscal
# period
fundamentals = get_sharadar_fundamentals_reindexed_like(
closes,
dimension="ARQ", # As-reported quarterly reports
fields=[
"REPORTPERIOD"
])
fiscal_periods = fundamentals.loc["REPORTPERIOD"]
are_new_fiscal_periods = fiscal_periods != fiscal_periods.shift()
periods_ago = 4
# this function will be applied sid by sid and returns a Series of
# earlier fundamentals
def n_periods_ago(fundamentals_for_sid):
sid = fundamentals_for_sid.name
# remove all rows except for new fiscal periods
new_period_fundamentals = fundamentals_for_sid.where(are_new_fiscal_periods[sid]).dropna()
# Shift the desired number of periods
earlier_fundamentals = new_period_fundamentals.shift(periods_ago)
# Reindex and forward-fill to restore original shape
earlier_fundamentals = earlier_fundamentals.reindex(fundamentals_for_sid.index, method="ffill")
return earlier_fundamentals
previous_return_on_assets = return_on_assets.apply(n_periods_ago)
previous_leverages = leverages.apply(n_periods_ago)
previous_current_ratios = current_ratios.apply(n_periods_ago)
previous_shares_out = shares_out.apply(n_periods_ago)
previous_gross_margins = gross_margins.apply(n_periods_ago)
previous_asset_turnovers = asset_turnovers.apply(n_periods_ago)
# Step 3: calculate F-Score components; each resulting component is a DataFrame
# of booleans
have_positive_return_on_assets = return_on_assets > 0
have_positive_operating_cash_flows = operating_cash_flows > 0
have_increasing_return_on_assets = return_on_assets > previous_return_on_assets
total_assets = total_assets.where(total_assets > 0) # avoid DivisionByZero errors
have_more_cash_flow_than_incomes = operating_cash_flows / total_assets > return_on_assets
have_decreasing_leverages = leverages < previous_leverages
have_increasing_current_ratios = current_ratios > previous_current_ratios
have_no_new_shares = shares_out <= previous_shares_out
have_increasing_gross_margins = gross_margins > previous_gross_margins
have_increasing_asset_turnovers = asset_turnovers > previous_asset_turnovers
# Save each boolean F score component as a feature
features["have_positive_return_on_assets"] = have_positive_return_on_assets.astype(int)
features["have_positive_operating_cash_flows"] = have_positive_operating_cash_flows.astype(int)
features["have_increasing_return_on_assets"] = have_increasing_return_on_assets.astype(int)
features["have_more_cash_flow_than_incomes"] = have_more_cash_flow_than_incomes.astype(int)
features["have_decreasing_leverages"] = have_decreasing_leverages.astype(int)
features["have_increasing_current_ratios"] = have_increasing_current_ratios.astype(int)
features["have_no_new_shares"] = have_no_new_shares.astype(int)
features["have_increasing_gross_margins"] = have_increasing_gross_margins.astype(int)
features["have_increasing_asset_turnovers"] = have_increasing_asset_turnovers.astype(int)
# Sum the components to get the F-Score and saves the ranks as a feature
f_scores = (
have_positive_return_on_assets.astype(int)
+ have_positive_operating_cash_flows.astype(int)
+ have_increasing_return_on_assets.astype(int)
+ have_more_cash_flow_than_incomes.astype(int)
+ have_decreasing_leverages.astype(int)
+ have_increasing_current_ratios.astype(int)
+ have_no_new_shares.astype(int)
+ have_increasing_gross_margins.astype(int)
+ have_increasing_asset_turnovers.astype(int)
)
features["f_score_ranks"] = f_scores.rank(axis=1, pct=True).fillna(0.5)
def add_price_and_volume_features(self, prices, features):
"""
Price and volume features, or features derived from price and volume:
- return ranks
- price level
- dollar volume rank
- volatility ranks
- volatility spikes
- volume spikes
"""
closes = prices.loc["Close"]
# yearly, monthly, weekly, 2-day, daily returns ranks
one_year_returns = (closes.shift(22) - closes.shift(252)) / closes.shift(252) # exclude most recent month, per classic momentum
one_month_returns = (closes - closes.shift(22)) / closes.shift(22)
one_week_returns = (closes - closes.shift(5)) / closes.shift(5)
two_day_returns = (closes - closes.shift(2)) / closes.shift(2)
one_day_returns = closes.pct_change()
features["1yr_returns_ranks"] = one_year_returns.rank(axis=1, pct=True).fillna(0.5)
features["1mo_returns_ranks"] = one_month_returns.rank(axis=1, pct=True).fillna(0.5)
features["1wk_returns_ranks"] = one_week_returns.rank(axis=1, pct=True).fillna(0.5)
features["2d_returns_ranks"] = two_day_returns.rank(axis=1, pct=True).fillna(0.5)
features["1d_returns_ranks"] = one_day_returns.rank(axis=1, pct=True).fillna(0.5)
# whether returns were positive
features["last_1year_was_positive"] = (one_year_returns > 0).astype(int)
features["last_1month_was_positive"] = (one_month_returns > 0).astype(int)
features["last_1week_was_positive"] = (one_week_returns > 0).astype(int)
features["last_2day_was_positive"] = (two_day_returns > 0).astype(int)
features["last_1day_was_positive"] = (one_day_returns > 0).astype(int)
# price level
features["price_below_10"] = closes < 10
features["price_below_2"] = closes < 2
# dollar volume ranks
volumes = prices.loc["Volume"]
avg_dollar_volumes = (closes * volumes).rolling(63).mean()
features["dollar_volume_ranks"] = avg_dollar_volumes.rank(axis=1, ascending=True, pct=True).fillna(0.5)
# quarterly volatility ranks
quarterly_stds = closes.pct_change().rolling(window=63).std()
features["quaterly_std_ranks"] = quarterly_stds.rank(axis=1, pct=True).fillna(0.5)
# volatility spikes
volatility_1d_vs_quarter = closes.pct_change().abs() / quarterly_stds.where(quarterly_stds > 0)
features["2std_volatility_spike"] = (volatility_1d_vs_quarter >= 2).astype(int)
features["volatility_spike_ranks"] = volatility_1d_vs_quarter.rank(axis=1, pct=True).fillna(0.5)
# volume spike
avg_volumes = volumes.rolling(window=63).mean()
volume_1d_vs_quarter = volumes / avg_volumes.where(avg_volumes > 0)
features["2x_volume_spike"] = (volume_1d_vs_quarter >= 2).astype(int)
features["volume_spike_ranks"] = volume_1d_vs_quarter.rank(axis=1, pct=True).fillna(0.5)
def add_technical_indicator_features(self, prices, features):
"""
Various technical indicators:
- Bollinger bands
- RSI
- Stochastic oscillator
- Money Flow Index
"""
closes = prices.loc["Close"]
# relative position within Bollinger Bands (0 = at or below lower band, 1 = at or above upper band)
mavgs = closes.rolling(20).mean()
stds = closes.rolling(20).std()
upper_bands = mavgs + (stds * 2)
lower_bands = mavgs - (stds * 2)
# Winsorize at upper and lower bands
winsorized_closes = closes.where(closes > lower_bands, lower_bands).where(closes < upper_bands, upper_bands)
features["close_vs_bbands"] = (winsorized_closes - lower_bands) / (winsorized_closes - lower_bands)
# RSI (0-1)
returns = closes.diff()
avg_gains = returns.where(returns > 0).rolling(window=14, min_periods=1).mean()
avg_losses = returns.where(returns < 0).abs().rolling(window=14, min_periods=1).mean()
relative_strengths = avg_gains / avg_losses.where(avg_losses != 0)
features["RSI"] = 1 - (1 / (1 + relative_strengths.fillna(0.5)))
# Stochastic oscillator (0-1)
highest_highs = closes.rolling(window=14).max()
lowest_lows = closes.rolling(window=14).min()
features["stochastic"] = (closes - lowest_lows) / (highest_highs - lowest_lows)
# Money flow (similar to RSI but volume-weighted) (0-1)
money_flows = closes * prices.loc["Volume"]
positive_money_flows = money_flows.where(returns > 0).rolling(window=14, min_periods=1).sum()
negative_money_flows = money_flows.where(returns < 0).rolling(window=14, min_periods=1).sum()
money_flow_ratios = positive_money_flows / negative_money_flows.where(negative_money_flows > 0)
features["money_flow"] = 1 - (1 / (1 + money_flow_ratios.fillna(0.5)))
def add_securities_master_features(self, prices, features):
"""
Features from the securities master:
- ADR?
- sector
"""
closes = prices.loc["Close"]
securities = get_securities_reindexed_like(closes, fields=["sharadar_Category", "sharadar_Sector"])
# Is it an ADR?
categories = securities.loc["sharadar_Category"]
unique_categories = categories.iloc[0].unique()
# this dataset includes several ADR classifications, all of which start with "ADR "
features["are_adrs"] = categories.isin([cat for cat in unique_categories if cat.startswith("ADR ")]).astype(int)
# Which sector? (sectors must be one-hot encoded - see usage guide for more)
sectors = securities.loc["sharadar_Sector"]
for sector in sectors.stack().unique():
features["sector_{}".format(sector)] = (sectors == sector).astype(int)
def add_market_features(self, prices, features):
"""
Market price, volatility, and breadth, some of which are queried from a
database and some of which are calculated from the Sharadar data:
- whether S&P 500 is above or below its 200-day moving average
- where VIX falls within the range of 12 - 30
- where 10-day NYSE TRIN falls within the range of 0.5 to 2
- McClellan oscillator
- Hindenburg Omen
"""
closes = prices.loc["Close"]
# Get prices for SPY, VIX, TRIN-NYSE
market_prices = get_prices(self.BENCHMARK_DB,
fields="Close",
start_date=closes.index.min(),
end_date=closes.index.max())
market_closes = market_prices.loc["Close"]
# Is S&P above its 200-day?
spy_closes = market_closes[self.SPY_SID]
spy_200d_mavg = spy_closes.rolling(200).mean()
spy_above_200d = (spy_closes > spy_200d_mavg).astype(int)
# Must reindex like closes in case indexes differ
spy_above_200d = spy_above_200d.reindex(closes.index, method="ffill")
features["spy_above_200d"] = closes.apply(lambda x: spy_above_200d)
# VIX and TRIN don't go back as far as Sharadar data, so we may need a filler DataFrame
fillers =
|
pd.DataFrame(0.5, index=closes.index, columns=closes.columns)
|
pandas.DataFrame
|
import pandas as pd
from datetime import datetime, timedelta
class JHUData(object):
url_pattern = (
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/"
"csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-{}.csv"
)
def __init__(self, refresh_rate=30):
self.refresh_rate = timedelta(minutes=refresh_rate)
self.data_sources = {
r: self.url_pattern.format(r) for r in ["Confirmed", "Deaths", "Recovered"]
}
self.auto_refresh(force=True)
def load_data(self):
df_list = [
pd.read_csv(data).assign(Record=record)
for record, data in self.data_sources.items()
]
df =
|
pd.concat(df_list, ignore_index=True)
|
pandas.concat
|
# ----------------------------------------------------------------------------
# Copyright (c) 2017-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import gzip
import os
import pathlib
import shutil
import tempfile
import unittest
import pandas as pd
from q2_cutadapt._demux import (_build_demux_command, _rename_files,
_write_barcode_fasta,
_write_empty_fastq_to_mux_barcode_in_seq_fmt)
from q2_types.multiplexed_sequences import (
MultiplexedSingleEndBarcodeInSequenceDirFmt,
MultiplexedPairedEndBarcodeInSequenceDirFmt)
from q2_types.per_sample_sequences import (
SingleLanePerSampleSingleEndFastqDirFmt,
SingleLanePerSamplePairedEndFastqDirFmt,
FastqGzFormat)
from qiime2 import Artifact, CategoricalMetadataColumn
from qiime2.util import redirected_stdio
from qiime2.plugin.testing import TestPluginBase
class TestDemuxSingle(TestPluginBase):
package = 'q2_cutadapt.tests'
def assert_demux_results(self, exp_samples_and_barcodes, obs_demuxed_art):
obs_demuxed = obs_demuxed_art.view(
SingleLanePerSampleSingleEndFastqDirFmt)
obs_demuxed_seqs = obs_demuxed.sequences.iter_views(FastqGzFormat)
zipped = zip(exp_samples_and_barcodes.iteritems(), obs_demuxed_seqs)
for (sample_id, barcode), (filename, _) in zipped:
filename = str(filename)
self.assertTrue(sample_id in filename)
self.assertTrue(barcode in filename)
def assert_untrimmed_results(self, exp, obs_untrimmed_art):
obs_untrimmed = obs_untrimmed_art.view(
MultiplexedSingleEndBarcodeInSequenceDirFmt)
obs_untrimmed = obs_untrimmed.file.view(FastqGzFormat)
obs_untrimmed = gzip.decompress(obs_untrimmed.path.read_bytes())
self.assertEqual(exp, obs_untrimmed)
def setUp(self):
super().setUp()
self.demux_single_fn = self.plugin.methods['demux_single']
muxed_sequences_fp = self.get_data_path('forward.fastq.gz')
self.muxed_sequences = Artifact.import_data(
'MultiplexedSingleEndBarcodeInSequence', muxed_sequences_fp)
def test_typical(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
self.assert_untrimmed_results(b'@id6\nGGGGACGTACGT\n+\nzzzzzzzzzzzz\n',
obs_untrimmed_art)
def test_all_matched(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC', 'GGGG'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b', 'sample_c'],
name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
# obs_untrimmed should be empty, since everything matched
self.assert_untrimmed_results(b'', obs_untrimmed_art)
def test_none_matched(self):
metadata = CategoricalMetadataColumn(
pd.Series(['TTTT'], name='Barcode',
index=pd.Index(['sample_d'], name='id')))
with redirected_stdio(stderr=os.devnull):
with self.assertRaisesRegex(ValueError, 'demultiplexed'):
self.demux_single_fn(self.muxed_sequences, metadata)
def test_error_tolerance_filtering(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAG', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
# sample_a is dropped because of a substitution error (AAAA vs AAAG)
exp_samples_and_barcodes = pd.Series(['CCCC'], index=['sample_b'])
self.assert_demux_results(exp_samples_and_barcodes, obs_demuxed_art)
self.assert_untrimmed_results(b'@id1\nAAAAACGTACGT\n+\nzzzzzzzzzzzz\n'
b'@id3\nAAAAACGTACGT\n+\nzzzzzzzzzzzz\n'
b'@id6\nGGGGACGTACGT\n+\nzzzzzzzzzzzz\n',
obs_untrimmed_art)
def test_error_tolerance_high_enough_to_prevent_filtering(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAG', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata,
error_rate=0.25)
# This test should yield the same results as test_typical, above
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
self.assert_untrimmed_results(b'@id6\nGGGGACGTACGT\n+\nzzzzzzzzzzzz\n',
obs_untrimmed_art)
def test_extra_barcode_in_metadata(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC', 'GGGG', 'TTTT'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b', 'sample_c',
'sample_d'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
# TTTT/sample_d shouldn't be in the demuxed results, because there
# were no reads with that barcode present
exp_samples_and_barcodes = pd.Series(['AAAA', 'CCCC', 'GGGG'],
index=['sample_a', 'sample_b',
'sample_c'])
self.assert_demux_results(exp_samples_and_barcodes, obs_demuxed_art)
# obs_untrimmed should be empty, since everything matched
self.assert_untrimmed_results(b'', obs_untrimmed_art)
def test_variable_length_barcodes(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAAA', 'CCCCCC', 'GGGG'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b', 'sample_c'],
name='id')))
muxed_sequences_fp = self.get_data_path('variable_length.fastq.gz')
muxed_sequences = Artifact.import_data(
'MultiplexedSingleEndBarcodeInSequence', muxed_sequences_fp)
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(muxed_sequences, metadata)
# This test should yield the same results as test_typical, above, just
# with variable length barcodes
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
self.assert_untrimmed_results(b'', obs_untrimmed_art)
def test_batch_size(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata,
batch_size=1)
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
self.assert_untrimmed_results(b'@id6\nGGGGACGTACGT\n+\nzzzzzzzzzzzz\n',
obs_untrimmed_art)
def test_invalid_batch_size(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with self.assertRaisesRegex(ValueError, '5.*cannot be greater.*2'):
self.demux_single_fn(self.muxed_sequences, metadata, batch_size=5)
def test_batch_size_odd_number_of_samples(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC', 'GGGG'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b', 'sample_c'],
name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata,
batch_size=2)
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
# obs_untrimmed should be empty, since everything matched
self.assert_untrimmed_results(b'', obs_untrimmed_art)
def test_min_length(self):
metadata = CategoricalMetadataColumn(
# The third barcode is meant to completely remove the only GGGG
# coded sequence
pd.Series(['AAAA', 'CCCC', 'GGGGACGTACGT'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b', 'sample_c'],
name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
obs = obs_demuxed_art.view(SingleLanePerSampleSingleEndFastqDirFmt)
(obs_f1, _), (obs_f2, _) = obs.sequences.iter_views(FastqGzFormat)
self.assertEqual('sample_a_AAAA_L001_R1_001.fastq.gz', str(obs_f1))
self.assertEqual('sample_b_CCCC_L001_R1_001.fastq.gz', str(obs_f2))
class TestDemuxPaired(TestPluginBase):
package = 'q2_cutadapt.tests'
def assert_demux_results(self, exp_samples_and_barcodes, obs_demuxed_art):
obs_demuxed = obs_demuxed_art.view(
SingleLanePerSamplePairedEndFastqDirFmt)
obs_demuxed_seqs = obs_demuxed.sequences.iter_views(FastqGzFormat)
# Since we are working with fwd/rev reads, duplicate each list elem
exp = [x for x in exp_samples_and_barcodes.iteritems() for _ in (0, 1)]
zipped = zip(exp, obs_demuxed_seqs)
for (sample_id, barcode), (filename, _) in zipped:
filename = str(filename)
self.assertTrue(sample_id in filename)
self.assertTrue(barcode in filename)
def assert_untrimmed_results(self, exp, obs_untrimmed_art):
obs_untrimmed = obs_untrimmed_art.view(
MultiplexedPairedEndBarcodeInSequenceDirFmt)
obs_untrimmed_f = obs_untrimmed.forward_sequences.view(FastqGzFormat)
obs_untrimmed_f = gzip.decompress(obs_untrimmed_f.path.read_bytes())
self.assertEqual(exp[0], obs_untrimmed_f)
obs_untrimmed_r = obs_untrimmed.reverse_sequences.view(FastqGzFormat)
obs_untrimmed_r = gzip.decompress(obs_untrimmed_r.path.read_bytes())
self.assertEqual(exp[1], obs_untrimmed_r)
def setUp(self):
super().setUp()
self.demux_paired_fn = self.plugin.methods['demux_paired']
muxed_sequences_f_fp = self.get_data_path('forward.fastq.gz')
muxed_sequences_r_fp = self.get_data_path('reverse.fastq.gz')
with tempfile.TemporaryDirectory() as temp:
shutil.copy(muxed_sequences_f_fp, temp)
shutil.copy(muxed_sequences_r_fp, temp)
self.muxed_sequences = Artifact.import_data(
'MultiplexedPairedEndBarcodeInSequence', temp)
# Just one proof-of-concept test here - the single-end test suite
# covers the edge cases.
def test_typical(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC'], name='Barcode',
index=
|
pd.Index(['sample_a', 'sample_b'], name='id')
|
pandas.Index
|
import math
import string
from typing import Optional, Sequence, Tuple
import hypothesis.strategies as st
import numpy as np
import pandas as pd
import pandas.testing as tm
import pyarrow as pa
import pytest
from hypothesis import example, given, settings
import fletcher as fr
from fletcher.testing import examples
try:
# Only available in pandas 1.2+
# When this class is defined, we can also use `.str` on fletcher columns.
from pandas.core.strings.object_array import ObjectStringArrayMixin # noqa F401
_str_accessors = ["str", "fr_str"]
except ImportError:
_str_accessors = ["fr_str"]
@pytest.fixture(params=_str_accessors, scope="module")
def str_accessor(request):
return request.param
@st.composite
def string_patterns_st(draw, max_len=50) -> Tuple[Sequence[Optional[str]], str, int]:
ab_charset_st = st.sampled_from("ab")
ascii_charset_st = st.sampled_from(string.ascii_letters)
charset_st = st.sampled_from((ab_charset_st, ascii_charset_st))
charset = draw(charset_st)
fixed_pattern_st = st.sampled_from(["a", "aab", "aabaa"])
generated_pattern_st = st.text(alphabet=charset, max_size=max_len)
pattern_st = st.one_of(fixed_pattern_st, generated_pattern_st)
pattern = draw(pattern_st)
min_str_size = 0 if len(pattern) > 0 else 1
raw_str_st = st.one_of(
st.none(), st.lists(charset, min_size=min_str_size, max_size=max_len)
)
raw_seq_st = st.lists(raw_str_st, max_size=max_len)
raw_seq = draw(raw_seq_st)
for s in raw_seq:
if s is None:
continue
"""
There seems to be a bug in pandas for this edge case
>>> pd.Series(['']).str.replace('', 'abc', n=1)
0
dtype: object
But
>>> pd.Series(['']).str.replace('', 'abc')
0 abc
dtype: object
I believe the second result is the correct one and this is what the
fletcher implementation returns.
"""
max_ind = len(s) - len(pattern)
if max_ind < 0:
continue
repl_ind_st = st.integers(min_value=0, max_value=max_ind)
repl_ind_list_st = st.lists(repl_ind_st, max_size=math.ceil(max_len / 10))
repl_ind_list = draw(repl_ind_list_st)
for j in repl_ind_list:
s[j : j + len(pattern)] = pattern
seq = ["".join(s) if s is not None else None for s in raw_seq]
offset = draw(st.integers(min_value=0, max_value=len(seq)))
return (seq, pattern, offset)
string_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", "bb", None], "a"),
(["aa", "ab", "ba", "bb", None], "A"),
(["aa", "ab", "bA", "bB", None], "a"),
(["aa", "AB", "ba", "BB", None], "A"),
],
)
strip_examples = examples(
example_list=[
[],
[""],
[None],
[" "],
["\u2000"],
[" a"],
["a "],
[" a "],
# https://github.com/xhochy/fletcher/issues/174
["\xa0"],
["\u2000a\u2000"],
["\u2000\u200C\u2000"],
["\n\u200C\r"],
["\u2000\x80\u2000"],
["\t\x80\x0b"],
["\u2000\u10FFFF\u2000"],
[" \u10FFFF "],
]
+ [
[c]
for c in " \t\r\n\x1f\x1e\x1d\x1c\x0c\x0b"
"\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2000\u2009\u200A\u200B\u2028\u2029\u202F\u205F"
]
+ [[chr(c)] for c in range(0x32)]
+ [[chr(c)] for c in range(0x80, 0x85)]
+ [[chr(c)] for c in range(0x200C, 0x2030)]
+ [[chr(c)] for c in range(0x2060, 0x2070)]
+ [[chr(c)] for c in range(0x10FFFE, 0x110000)],
example_kword="data",
)
def _fr_series_from_data(data, fletcher_variant, dtype=pa.string(), index=None):
arrow_data = pa.array(data, type=dtype)
if fletcher_variant == "chunked":
fr_array = fr.FletcherChunkedArray(arrow_data)
else:
fr_array = fr.FletcherContinuousArray(arrow_data)
return pd.Series(fr_array, index=index)
def _check_series_equal(result_fr, result_pd):
result_fr = result_fr.astype(result_pd.dtype)
tm.assert_series_equal(result_fr, result_pd)
def _check_str_to_t(
t, func, data, str_accessor, fletcher_variant, test_offset=0, *args, **kwargs
):
"""Check a .str. function that returns a series with type t."""
tail_len = len(data) - test_offset
error = None
try:
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, func)(*args, **kwargs)
except Exception as e:
error = e
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
if error:
# If pandas raises an exception, fletcher should do so, too.
with pytest.raises(type(error)):
result_fr = getattr(getattr(ser_fr, str_accessor), func)(*args, **kwargs)
else:
result_fr = getattr(getattr(ser_fr, str_accessor), func)(*args, **kwargs)
_check_series_equal(result_fr, result_pd)
def _check_str_to_str(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(str, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def _check_str_to_bool(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(bool, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def _check_str_to_int(func, data, str_accessor, fletcher_variant, *args, **kwargs):
_check_str_to_t(int, func, data, str_accessor, fletcher_variant, *args, **kwargs)
def test_fr_str_accessor(fletcher_array):
data = ["a", "b"]
ser_pd = pd.Series(data)
# object series is returned
s = ser_pd.fr_str.encode("utf8")
assert s.dtype == np.dtype("O")
# test fletcher functionality and fallback to pandas
arrow_data = pa.array(data, type=pa.string())
fr_array = fletcher_array(arrow_data)
ser_fr = pd.Series(fr_array)
# pandas strings only method
s = ser_fr.fr_str.encode("utf8")
assert isinstance(s.values, fr.FletcherBaseArray)
def test_fr_str_accessor_fail(fletcher_variant):
data = [1, 2]
ser_pd = pd.Series(data)
with pytest.raises(Exception):
ser_pd.fr_str.startswith("a")
@settings(deadline=None)
@given(char=st.characters(blacklist_categories=("Cs",)))
def test_utf8_size(char):
char_bytes = char.encode("utf-8")
expected = len(char_bytes)
computed = fr.algorithms.string.get_utf8_size(char_bytes[0])
assert computed == expected
#####################################################
## String accessor methods (sorted alphabetically) ##
#####################################################
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_capitalize(data, str_accessor, fletcher_variant):
_check_str_to_str("capitalize", data, str_accessor, fletcher_variant)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_casefold(data, str_accessor, fletcher_variant):
_check_str_to_str("casefold", data, str_accessor, fletcher_variant)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_cat(data, str_accessor, fletcher_variant, fletcher_variant_2):
if any("\x00" in x for x in data if x):
# pytest.skip("pandas cannot handle \\x00 characters in tests")
# Skip is not working properly with hypothesis
return
ser_pd = pd.Series(data, dtype=str)
ser_fr = _fr_series_from_data(data, fletcher_variant)
ser_fr_other = _fr_series_from_data(data, fletcher_variant_2)
result_pd = ser_pd.str.cat(ser_pd)
result_fr = getattr(ser_fr, str_accessor).cat(ser_fr_other)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
tm.assert_series_equal(result_fr, result_pd)
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
width=st.integers(min_value=0, max_value=50),
)
def test_center(data, width, str_accessor, fletcher_variant):
_check_str_to_str("center", data, str_accessor, fletcher_variant, width=width)
@string_patterns
def test_contains_no_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=False
)
@pytest.mark.parametrize(
"data, pat, expected",
[
([], "", []),
(["a", "b"], "", [True, True]),
(["aa", "Ab", "ba", "bb", None], "a", [True, False, True, False, None]),
],
)
def test_contains_no_regex_ascii(data, pat, expected, str_accessor, fletcher_variant):
if str_accessor == "str":
pytest.skip(
"return types not stable yet, might sometimes return null instead of bool"
)
return
fr_series = _fr_series_from_data(data, fletcher_variant)
fr_expected = _fr_series_from_data(expected, fletcher_variant, pa.bool_())
# Run over slices to check offset handling code
for i in range(len(data)):
ser = fr_series.tail(len(data) - i)
expected = fr_expected.tail(len(data) - i)
result = getattr(ser, str_accessor).contains(pat, regex=False)
tm.assert_series_equal(result, expected)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
def test_contains_no_regex_case_sensitive(data_tuple, str_accessor, fletcher_variant):
data, pat, test_offset = data_tuple
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
case=True,
regex=False,
)
@string_patterns
def test_contains_no_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=False,
case=False,
)
regex_patterns = pytest.mark.parametrize(
"data, pat",
[
([], ""),
(["a", "b"], ""),
(["aa", "ab", "ba"], "a"),
(["aa", "ab", "ba", None], "a"),
(["aa", "ab", "ba", None], "a$"),
(["aa", "ab", "ba", None], "^a"),
(["Aa", "ab", "ba", None], "A"),
(["aa", "AB", "ba", None], "A$"),
(["aa", "AB", "ba", None], "^A"),
],
)
@regex_patterns
def test_contains_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains", data, str_accessor, fletcher_variant, pat=pat, regex=True
)
@regex_patterns
def test_contains_regex_ignore_case(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool(
"contains",
data,
str_accessor,
fletcher_variant,
pat=pat,
regex=True,
case=False,
)
@settings(deadline=None)
@given(data_tuple=string_patterns_st())
@example(data_tuple=(["a"], "", 0), fletcher_variant="chunked")
def test_count_no_regex(data_tuple, str_accessor, fletcher_variant):
"""Check a .str. function that returns a series with type t."""
data, pat, test_offset = data_tuple
tail_len = len(data) - test_offset
ser_pd = pd.Series(data, dtype=str).tail(tail_len)
result_pd = getattr(ser_pd.str, "count")(pat=pat)
ser_fr = _fr_series_from_data(data, fletcher_variant).tail(tail_len)
kwargs = {}
if str_accessor.startswith("fr_"):
kwargs["regex"] = False
result_fr = getattr(ser_fr, str_accessor).count(pat=pat, **kwargs)
_check_series_equal(result_fr, result_pd)
@regex_patterns
def test_count_regex(data, pat, str_accessor, fletcher_variant):
_check_str_to_int("count", data, str_accessor, fletcher_variant, pat=pat)
@string_patterns
def test_text_endswith(data, pat, str_accessor, fletcher_variant):
_check_str_to_bool("endswith", data, str_accessor, fletcher_variant, pat=pat)
def _check_extract(func, str_accessor, fletcher_variant, data, regex):
if str_accessor == "str":
pytest.skip(f"{func} is not yet dispatched to the ExtensionArray")
return
index = pd.Index(range(1, len(data) + 1))
ser_fr = _fr_series_from_data(data, fletcher_variant, index=index)
result_fr = getattr(getattr(ser_fr, str_accessor), func)(regex)
assert isinstance(result_fr[0].dtype, fr.FletcherBaseDtype)
ser_pd = pd.Series(data, index=index)
result_pd = getattr(ser_pd.str, func)(regex)
tm.assert_frame_equal(result_pd, result_fr.astype(object))
@pytest.mark.parametrize("regex", ["([0-9]+)", "([0-9]+)\\+([a-z]+)*"])
@pytest.mark.parametrize(
"data", [["123+"], ["123+a"], ["123+a", "123+"], ["123+", "123+a"]]
)
def test_extract(str_accessor, fletcher_variant, data, regex):
_check_extract("extract", str_accessor, fletcher_variant, data, regex)
@pytest.mark.parametrize("regex", ["([0-9]+)", "([0-9]+)\\+([a-z]+)*"])
@pytest.mark.parametrize(
"data", [["123+"], ["123+a"], ["123+a", "123+"], ["123+", "123+a"]]
)
def test_extractall(str_accessor, fletcher_variant, data, regex):
_check_extract("extractall", str_accessor, fletcher_variant, data, regex)
@string_patterns
def test_find(data, pat, str_accessor, fletcher_variant):
_check_str_to_int("find", data, str_accessor, fletcher_variant, sub=pat)
@string_patterns
def test_findall(data, pat, str_accessor, fletcher_variant):
_check_str_to_int("findall", data, str_accessor, fletcher_variant, pat=pat)
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
n=st.integers(min_value=0, max_value=10),
)
def test_get(data, n, str_accessor, fletcher_variant):
_check_str_to_str("get", data, str_accessor, fletcher_variant, i=n)
@string_patterns
def test_index(data, pat, str_accessor, fletcher_variant):
_check_str_to_int("index", data, str_accessor, fletcher_variant, sub=pat)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_len(data, str_accessor, fletcher_variant):
_check_str_to_int("len", data, str_accessor, fletcher_variant)
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
n=st.integers(min_value=0, max_value=50),
)
def test_ljust(data, n, str_accessor, fletcher_variant):
_check_str_to_str("ljust", data, str_accessor, fletcher_variant, width=n)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
def test_lower(data, str_accessor, fletcher_variant):
_check_str_to_str("lower", data, str_accessor, fletcher_variant)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@strip_examples
def test_lstrip(str_accessor, fletcher_variant, data):
_do_test_text_strip(str_accessor, fletcher_variant, 1, data, strip_method="lstrip")
@pytest.mark.parametrize("case", [True, False])
@pytest.mark.parametrize("pat", ["([0-9]+)", "([0-9]+)\\+([a-z]+)*"])
@pytest.mark.parametrize(
"data", [["123+"], ["123+a"], ["123+a", "123+"], ["123+", "123+a"]]
)
def test_match(data, pat, case, str_accessor, fletcher_variant):
_check_str_to_bool(
"match", data, str_accessor, fletcher_variant, pat=pat, case=case
)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@pytest.mark.parametrize("form", ["NFC", "NFKC", "NFD", "NFKD"])
def test_normalize(data, form, str_accessor, fletcher_variant):
_check_str_to_str("normalize", data, str_accessor, fletcher_variant, form=form)
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
n=st.integers(min_value=0, max_value=50),
)
@pytest.mark.parametrize("side", ["left", "right", "both"])
def test_pad(data, n, side, str_accessor, fletcher_variant):
_check_str_to_str("pad", data, str_accessor, fletcher_variant, width=n, side=side)
@pytest.mark.parametrize("data", [["123"], ["123+"], ["123+a+", "123+"]])
@pytest.mark.parametrize("expand", [True, False])
def test_partition(str_accessor, fletcher_variant, data, expand):
if not expand:
pytest.xfail(
"partition(expand=False) not supported as pyarrow cannot deal with tuples"
)
if str_accessor == "str":
pytest.xfail("string.parititon always returns a tuple")
_do_test_split(
str_accessor, fletcher_variant, data, expand, split_method="partition"
)
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
n=st.integers(min_value=0, max_value=10),
)
def test_repeat(data, n, str_accessor, fletcher_variant):
_check_str_to_str("repeat", data, str_accessor, fletcher_variant, repeats=n)
@settings(deadline=None)
@given(
data_tuple=string_patterns_st(),
n=st.integers(min_value=0, max_value=10),
repl=st.sampled_from(["len4", "", "z"]),
)
@example(
data_tuple=(["aababaa"], "aabaa", 0),
repl="len4",
n=1,
fletcher_variant="continuous",
)
@example(data_tuple=(["aaa"], "a", 0), repl="len4", n=1, fletcher_variant="continuous")
def test_replace_no_regex_case_sensitive(
data_tuple, repl, n, str_accessor, fletcher_variant
):
data, pat, test_offset = data_tuple
_check_str_to_str(
"replace",
data,
str_accessor,
fletcher_variant,
test_offset=test_offset,
pat=pat,
repl=repl,
n=n,
case=True,
regex=False,
)
@string_patterns
def test_rfind(data, pat, str_accessor, fletcher_variant):
_check_str_to_int("rfind", data, str_accessor, fletcher_variant, sub=pat)
@string_patterns
def test_rindex(data, pat, str_accessor, fletcher_variant):
_check_str_to_int("index", data, str_accessor, fletcher_variant, sub=pat)
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
n=st.integers(min_value=0, max_value=50),
)
def test_rjust(data, n, str_accessor, fletcher_variant):
_check_str_to_str("rjust", data, str_accessor, fletcher_variant, width=n)
@pytest.mark.parametrize("data", [["123"], ["123+"], ["123+a+", "123+"]])
@pytest.mark.parametrize("expand", [True, False])
def test_rpartition(str_accessor, fletcher_variant, data, expand):
if not expand:
pytest.xfail(
"partition(expand=False) not supported as pyarrow cannot deal with tuples"
)
if str_accessor == "str":
pytest.xfail("string.parititon always returns a tuple")
_do_test_split(
str_accessor, fletcher_variant, data, expand, split_method="rpartition"
)
@settings(deadline=None)
@given(data=st.lists(st.one_of(st.text(), st.none())))
@strip_examples
def test_rstrip(str_accessor, fletcher_variant, data):
_do_test_text_strip(str_accessor, fletcher_variant, 1, data, strip_method="rstrip")
@settings(deadline=None)
@given(
data=st.lists(st.one_of(st.text(), st.none())),
slice_=st.tuples(st.integers(-20, 20), st.integers(-20, 20), st.integers(-20, 20)),
)
def test_slice(data, slice_, str_accessor, fletcher_variant):
if slice_[2] == 0:
pytest.raises(ValueError)
return
if data == [None] or data == [""]:
return
ser_fr = _fr_series_from_data(data, fletcher_variant)
result_fr = getattr(ser_fr, str_accessor).slice(*slice_)
result_fr = result_fr.astype(object)
# Pandas returns np.nan for NA values in cat, keep this in line
result_fr[result_fr.isna()] = np.nan
ser_pd = pd.Series(data, dtype=object)
result_pd = ser_pd.str.slice(*slice_)
|
tm.assert_series_equal(result_fr, result_pd)
|
pandas.testing.assert_series_equal
|
import pandas as pd
import pytest
import helpers.unit_test as hut
import im.common.data.types as icdtyp
import im.kibot.data.load as vkdloa
class TestKibotS3DataLoader(hut.TestCase):
def setUp(self) -> None:
super().setUp()
self._s3_data_loader = vkdloa.KibotS3DataLoader()
@pytest.mark.slow
def test1(self) -> None:
df = self._s3_data_loader._read_data(
symbol="XG",
asset_class=icdtyp.AssetClass.Futures,
frequency=icdtyp.Frequency.Daily,
contract_type=icdtyp.ContractType.Continuous,
)
self.check_string(df.head(10).to_string())
@pytest.mark.skip(reason="Not implemented yet")
def test_read_data_with_start_end_ts(self) -> None:
"""
Test correctness of hourly ES data loading.
"""
# Load data.
data = self._s3_data_loader._read_data(
symbol="XG",
asset_class=icdtyp.AssetClass.Futures,
frequency=icdtyp.Frequency.Daily,
contract_type=icdtyp.ContractType.Continuous,
start_ts=pd.to_datetime("1990-12-28 00:00:00"),
end_ts=
|
pd.to_datetime("1991-01-14 00:00:00")
|
pandas.to_datetime
|
#!/usr/bin/env python3
"""<NAME>, Programming Assignment 4, test_regression_perceptron.py
"""
# Standard library imports
import collections as c
from copy import deepcopy
import json
from pathlib import Path
import warnings
# Third party imports
import numpy as np
import pandas as pd
from numba import jit, njit
import numba as nb
# Local imports
from p4.preprocessing import Preprocessor
from p4.perceptrons.regression_perceptron import predict, train_perceptron
from p4.utils import mse
from p4.preprocessing.split import make_splits
from p4.preprocessing.standardization import get_standardization_params, standardize, get_standardization_cols
warnings.filterwarnings('ignore')
# Define constants
TEST_DIR = Path(".").absolute()
REPO_DIR = TEST_DIR.parent
P4_DIR = REPO_DIR / "p4"
SRC_DIR = REPO_DIR / "data"
DST_DIR = REPO_DIR / "data" / "out"
DST_DIR.mkdir(exist_ok=True, parents=True)
THRESH = 0.01
K_FOLDS = 5
VAL_FRAC = 0.2
# Load data catalog and tuning params
with open(SRC_DIR / "data_catalog.json", "r") as file:
data_catalog = json.load(file)
data_catalog = {k: v for k, v in data_catalog.items() if k in ["forestfires", "machine", "abalone"]}
def test_regression_perceptron():
# Iterate over each dataset
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for dataset_name, dataset_meta in data_catalog.items():
print(dataset_name)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Preprocess dataset
preprocessor = Preprocessor(dataset_name, dataset_meta, SRC_DIR)
preprocessor.load()
preprocessor.drop()
preprocessor.identify_features_label_id()
preprocessor.replace()
preprocessor.log_transform()
preprocessor.set_data_classes()
preprocessor.impute()
preprocessor.dummy()
preprocessor.set_data_classes()
preprocessor.shuffle()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Extract feature and label columns
label = preprocessor.label
features = [x for x in preprocessor.features if x != label]
problem_class = dataset_meta["problem_class"]
data = preprocessor.data.copy()
data = data[[label] + features]
if problem_class == "classification":
data[label] = data[label].astype(int)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Assign folds
data["fold"] = make_splits(data, problem_class, label, k_folds=K_FOLDS, val_frac=None)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Validate: Iterate over each fold-run
print(f"\tValidate")
val_results_li = []
test_sets = {}
etas = {}
te_results_li = []
for fold in range(1, K_FOLDS + 1):
print(f"\t\t{fold}")
test_mask = data["fold"] == fold
test = data.copy()[test_mask].drop(axis=1, labels="fold") # We'll save the test for use later
train_val = data.copy()[~test_mask].drop(axis=1, labels="fold")
train_val["train"] = make_splits(train_val, problem_class, label, k_folds=None, val_frac=VAL_FRAC)
train_mask = train_val["train"] == 1
train = train_val.copy()[train_mask].drop(axis=1, labels="train")
val = train_val.copy()[~train_mask].drop(axis=1, labels="train")
# Get standardization parameters from training-validation set
cols = get_standardization_cols(train, features)
means, std_devs = get_standardization_params(train.copy()[cols])
# Standardize data
train = train.drop(axis=1, labels=cols).join(standardize(train[cols], means, std_devs))
val = val.drop(axis=1, labels=cols).join(standardize(val[cols], means, std_devs))
test = test.drop(axis=1, labels=cols).join(standardize(test[cols], means, std_devs)) # Save test for later
# Add bias terms
train["intercept"] = 1
val["intercept"] = 1
test["intercept"] = 1 # Save test for later
YX_tr = train.copy().astype(np.float64).values
YX_te = test.copy().astype(np.float64).values # Save test for later
YX_val = val.copy().astype(np.float64).values
Y_tr, X_tr = YX_tr[:, 0].reshape(len(YX_tr), 1), YX_tr[:, 1:]
test_sets[fold] = dict(Y_te=YX_te[:, 0].reshape(len(YX_te), 1), X_te=YX_te[:, 1:]) # Save test for later
Y_val, X_val = YX_val[:, 0].reshape(len(YX_val), 1), YX_val[:, 1:]
for eta in [0.00001, 0.0001, 0.001, 0.01, 0.1, 0.2, 0.4, 1]:
w_tr = train_perceptron(Y_tr, X_tr, eta, thresh=THRESH)
Yhat_val = predict(X_val, w_tr)
mse_val = mse(Y_val, Yhat_val)
val_results_li.append(dict(dataset_name=dataset_name, fold=fold, eta=eta, mse_val=mse_val))
etas[(fold, eta)] = w_tr # Save etas for later
val_results = pd.DataFrame(val_results_li)
val_summary = val_results.groupby("eta")["mse_val"].mean().sort_values().to_frame()
best_eta = val_summary.index.values[0]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Test
print(f"\tTest")
for fold in range(1, K_FOLDS + 1):
print(f"\t\t{fold}")
w_tr = etas[(fold, best_eta)]
Y_te, X_te = test_sets[fold]["Y_te"], test_sets[fold]["X_te"]
Yhat_te = predict(X_te, w_tr)
mse_te = mse(Y_te, Yhat_te)
te_results_li.append(dict(problem_class=problem_class, dataset_name=dataset_name, fold=fold, mse_te=mse_te,
best_eta=best_eta))
te_results =
|
pd.DataFrame(te_results_li)
|
pandas.DataFrame
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
length = len(wantStrip)
return url[loc+length:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.kklee.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
#
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Wishbykorea():
shop_id = 14
name = 'wishbykorea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
print(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div/div/label" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = page_link.replace("https://www.wishbykorea.com/collection-view-", "").replace("&ca=727", "")
find_href = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]/label" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
if(sale_price == "0"):
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll =
|
pd.concat([dfAll, df])
|
pandas.concat
|
#
# Copyright (c) 2015-2017 EpiData, Inc.
#
from datetime import datetime, timedelta
from epidata.analytics import IMR, outliers
from epidata.context import ec
import pandas
from py4j.protocol import Py4JJavaError
from pyspark.sql import Row
from pyspark.sql import Column
import unittest
AutomatedTest = Row(
'company',
'site',
'device_group',
'tester',
'ts',
'device_name',
'test_name',
'meas_name',
'meas_datatype',
'meas_value',
'meas_unit',
'meas_status',
'meas_lower_limit',
'meas_upper_limit',
'meas_description',
'device_status',
'test_status')
ts = [datetime.fromtimestamp(1428004316.123 + x) for x in range(15)]
class IMRTest(unittest.TestCase):
def assertEqualRows(self, one, two):
if not isinstance(one, Column):
self.assertEqual(one.asDict(), two.asDict())
def assertEqualDataFrames(self, one, two):
self.assertEqual(one.count(), two.count())
for i, j in zip(one, two):
self.assertEqualRows(i, j)
def test_single_meas_dist(self):
df = ec.query_measurements_original({'company': 'Company-1',
'site': 'Site-1',
'device_group': '1000',
'tester': 'Station-1',
'meas_name': 'Meas-1'},
ts[0],
ts[4])
imr = IMR(df)
self.assertEqual([45.7, 49.1, 48.8], imr.toPandas(
).loc[:, 'meas_value'].values.tolist())
self.assertEqual(
[45.7, 49.1, 48.8], imr.toPandas().loc[:, 'I'].values.tolist())
i_mean = (45.7 + 49.1 + 48.8) / 3.0
self.assertEqual(
[i_mean] * 3, imr.toPandas().loc[:, 'I_mean'].values.tolist())
i_lcl = i_mean - 2.66 * i_mean
self.assertEqual(
[i_lcl] * 3, imr.toPandas().loc[:, 'I_LCL'].values.tolist())
i_ucl = i_mean + 2.66 * i_mean
self.assertEqual(
[i_ucl] * 3, imr.toPandas().loc[:, 'I_UCL'].values.tolist())
self.assertEqual([49.1 - 45.7, 49.1 - 48.8],
imr.toPandas().loc[:, 'MR'].values.tolist()[1:])
mr_mean = (49.1 - 45.7 + 49.1 - 48.8) / 2.0
self.assertEqual(
[mr_mean] * 3,
imr.toPandas().loc[
:,
'MR_mean'].values.tolist())
mr_lcl = 0.0
self.assertEqual(
[mr_lcl] * 3,
imr.toPandas().loc[
:,
'MR_LCL'].values.tolist())
mr_ucl = mr_mean + 3.267 * mr_mean
self.assertEqual(
[mr_ucl] * 3,
imr.toPandas().loc[
:,
'MR_UCL'].values.tolist())
def test_all_meas_dist(self):
df = ec.query_measurements_original({'company': 'Company-1',
'site': 'Site-1',
'device_group': '1000',
'tester': 'Station-1'},
ts[0],
ts[4] + timedelta(seconds=0.5))
imr = IMR(df).toPandas().drop('ts', 1)
# Compare without 'ts' column due to time representation inconsistencies
# between systems.
self.assertEqual(
' company site device_group tester device_name test_name meas_name meas_datatype meas_value meas_unit meas_status meas_lower_limit meas_upper_limit meas_description device_status test_status I I_mean I_LCL I_UCL MR MR_mean MR_LCL MR_UCL\n'
'0 Company-1 Site-1 1000 Station-1 100001 Test-1 Meas-1 None 45.7 degree C PASS 40.0 90.0 Description PASS PASS 45.7 47.866667 -79.458667 175.192 NaN 1.85 0.0 7.89395\n'
'1 Company-1 Site-1 1000 Station-1 101001 Test-1 Meas-1 None 49.1 degree C PASS 40.0 90.0 Description PASS PASS 49.1 47.866667 -79.458667 175.192 3.4 1.85 0.0 7.89395\n'
'2 Company-1 Site-1 1000 Station-1 101001 Test-1 Meas-1 None 48.8 degree C PASS 40.0 90.0 Description PASS PASS 48.8 47.866667 -79.458667 175.192 0.3 1.85 0.0 7.89395\n'
'3 Company-1 Site-1 1000 Station-1 101001 Test-1 Meas-2 None 88.8 degree C PASS 40.0 90.0 Description PASS PASS 88.8 83.200000 -138.112000 304.512 NaN 11.20 0.0 47.79040\n'
'4 Company-1 Site-1 1000 Station-1 101001 Test-1 Meas-2 None 77.6 degree C PASS 40.0 90.0 Description PASS PASS 77.6 83.200000 -138.112000 304.512 11.2 11.20 0.0 47.79040',
imr.to_string())
def test_selected_meas_dist(self):
df = ec.query_measurements_original({'company': 'Company-1',
'site': 'Site-1',
'device_group': '1000',
'tester': 'Station-1'},
ts[0],
ts[4] + timedelta(seconds=0.5))
# Filtering by meas_name before IMR and within IMR are equivalent.
self.assertEqualDataFrames(IMR(df.filter(df.meas_name == 'Meas-1')),
IMR(df, ['Meas-1']))
# Filtering that matches all the meas_names is the same as no
# filtering.
self.assertEqualDataFrames(IMR(df),
IMR(df, ['Meas-1', 'Meas-2']))
# Filtering with a single name is also supported.
self.assertEqualDataFrames(IMR(df.filter(df.meas_name == 'Meas-1')),
IMR(df, 'Meas-1'))
def test_insufficient_meas_dist(self):
df = ec.query_measurements_original({'company': 'Company-1',
'site': 'Site-1',
'device_group': '1000',
'tester': 'Station-1'},
ts[0],
# Omit the second Meas-2
# measurement.
ts[3] + timedelta(seconds=0.5))
# With the second Meas-2 measurement ommitted there won't be enough
# measurements to perform IMR.
with self.assertRaises(Py4JJavaError):
IMR(df).retrieve()
# IMR on the first measurement only is fine.
IMR(df, 'Meas-1').retrieve()
class OutliersTest(unittest.TestCase):
def test_quartile(self):
df = ec.query_measurements_original(
{
'company': 'Company-1',
'site': 'Site-1',
'device_group': '1000',
'tester': 'Station-2'},
ts[0],
ts[14] +
timedelta(
seconds=0.5)).toPandas()
expected = df.ix[10:14, :]
expected.loc[
:,
'meas_flag'] = [
'mild',
'extreme',
'mild',
'extreme']
expected.loc[:, 'meas_method'] = ['quartile']
self.assertEqual(expected.to_string(),
outliers(df, 'meas_value', 'quartile').to_string())
def test_quartile_empty(self):
# Test calling quartile on an empty DataFrame.
df = ec.query_measurements_original({'company': 'Company-1',
'site': 'Site-1',
'device_group': '1000',
'tester': 'Station-NONE'}, # No data for Station-NONE
ts[0],
ts[14] + timedelta(seconds=0.5)).toPandas()
self.assertEqual(0,
outliers(df, 'meas_value', 'quartile').shape[0])
def test_quartile_none(self):
# Test a data frame containing a missing value.
df = pandas.DataFrame({'a': [1.0] * 8 + [None, 2.0]})
expected = df.ix[9:, :]
expected.loc[:, 'meas_flag'] = ['extreme']
expected.loc[:, 'meas_method'] = ['quartile']
self.assertEqual(
expected.to_string(),
outliers(df, 'a', 'quartile').to_string())
def test_quartile_string(self):
df = pandas.DataFrame({'a': [1.0] * 8 + ['A STRING']})
self.assertEqual(0,
outliers(df, 'a', 'quartile').shape[0])
def test_quartile_numeric_object(self):
# Test numeric values within an object typed column.
df = pandas.DataFrame({'a': [1.0] * 9 + [2.0]}, dtype='object')
expected = df.ix[9:, :]
expected.loc[:, 'meas_flag'] = ['extreme']
expected.loc[:, 'meas_method'] = ['quartile']
result = outliers(df, 'a', 'quartile')
self.assertEqual(expected.to_string(), result.to_string())
self.assertTrue(all(['object', 'object', 'object'] == result.dtypes))
def test_quartile_invalid_column(self):
df = pandas.DataFrame({'a': [1.0]})
with self.assertRaises(KeyError):
outliers(df, 'INVALID_COLUMN_NAME', 'quartile')
def test_invalid_method(self):
df =
|
pandas.DataFrame({'a': [1.0]})
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# x13.py
# @Author : wanhanwan (<EMAIL>)
# @Link : ~
# @Date : 2019/11/24 上午9:53:41
"""
X13季节性调整。
注:
cny.csv文件记录中国历年农历春节的日期,目前截止到2020年春节。
x13as.exe是X13主程序目录。
"""
import os
import pandas as pd
import numpy as np
from pathlib import Path
from statsmodels.tsa.x13 import x13_arima_analysis
from pandas import DataFrame, Series, Timestamp
from pandas.tseries.frequencies import to_offset
from functools import lru_cache
from QuantLib.tools import RollingResultWrapper
curr_path = Path(__file__).parent
@lru_cache()
def get_spring_val(before=10, after=7): ###用于生成移动假日参数,移动假日放在cny.dat中
data = pd.read_csv(curr_path/'cny.csv', index_col='rank')
x1=Series()
x2=Series()
x3=Series()
xx=Series(0,index=pd.date_range('1960-01-01','2030-01-01'))
for i in range(len(data)):
s=data.iloc[i]
d=s[1]
m=s[0]
y=s[2]
t=Timestamp(y,m,d)
start=t-to_offset('%dd'%before)
end=t+to_offset('%dd'%after)
xx[pd.date_range(start,end)]=1
d1=xx['%d-%d'%(y,1)].sum()/31
d2=xx['%d-%d'%(y,2)].sum()/28
d3=xx['%d-%d'%(y,3)].sum()/31
x1[Timestamp(y,1,31)]=d1
x2[Timestamp(y,2,1)+to_offset('M')]=d2
x3[Timestamp(y,3,31)]=d3
x1=x1-x1.mean()
x2=x2-x2.mean()
x3=x3-x3.mean()
xx=Series(0,index=pd.date_range('1960-01-01','2030-01-01',freq='M'))
xx[x1.index]=x1
xx[x2.index]=x2
xx[x3.index]=x3
return xx.sort_index()
a=get_spring_val()
def merge_spring_element(data):
"""
删除1、2月份的数据,用这两
个月的均值填充为新的2月份数据。
"""
if not isinstance(data, (Series, DataFrame)):
raise ValueError('非常规时间序列')
start = data.index[0]
gap=Timestamp(data.index[1])-
|
Timestamp(start)
|
pandas.Timestamp
|
"""
Tests that the file header is properly handled or inferred
during parsing for all of the parsers defined in parsers.py
"""
from collections import namedtuple
from io import StringIO
import numpy as np
import pytest
from pandas.errors import ParserError
from pandas import (
DataFrame,
Index,
MultiIndex,
)
import pandas._testing as tm
# TODO(1.4): Change me to xfails at release time
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
@skip_pyarrow
def test_read_with_bad_header(all_parsers):
parser = all_parsers
msg = r"but only \d+ lines in file"
with pytest.raises(ValueError, match=msg):
s = StringIO(",,")
parser.read_csv(s, header=[10])
def test_negative_header(all_parsers):
# see gh-27779
parser = all_parsers
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
with pytest.raises(
ValueError,
match="Passing negative integer to header is invalid. "
"For no header, use header=None instead",
):
parser.read_csv(StringIO(data), header=-1)
@pytest.mark.parametrize("header", [([-1, 2, 4]), ([-5, 0])])
def test_negative_multi_index_header(all_parsers, header):
# see gh-27779
parser = all_parsers
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
with pytest.raises(
ValueError, match="cannot specify multi-index header with negative integers"
):
parser.read_csv(StringIO(data), header=header)
@pytest.mark.parametrize("header", [True, False])
def test_bool_header_arg(all_parsers, header):
# see gh-6114
parser = all_parsers
data = """\
MyColumn
a
b
a
b"""
msg = "Passing a bool to header is invalid"
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), header=header)
def test_no_header_prefix(all_parsers):
parser = all_parsers
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
result = parser.read_csv(StringIO(data), prefix="Field", header=None)
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
columns=["Field0", "Field1", "Field2", "Field3", "Field4"],
)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_header_with_index_col(all_parsers):
parser = all_parsers
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ["A", "B", "C"]
result = parser.read_csv(StringIO(data), names=names)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result, expected)
def test_header_not_first_line(all_parsers):
parser = all_parsers
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
result = parser.read_csv(StringIO(data), header=2, index_col=0)
expected = parser.read_csv(StringIO(data2), header=0, index_col=0)
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
# coding: utf-8
# In[ ]:
__author__ = '<NAME>'
# get_ipython().magic('matplotlib notebook')
# from IPython.display import set_matplotlib_formats
# set_matplotlib_formats('png', 'pdf')
# from IPython.display import Image
# from IPython.display import Math
import os
import sys
import shutil
import gdal
import pickle
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import flopy as fp
import pandas as pd
import geopandas as gp
import scipy.stats as ss
import scipy.optimize as so
from scipy.interpolate import UnivariateSpline
# from ipywidgets import interact, Dropdown
# from IPython.display import display
# In[ ]:
homes = ['../Models']
fig_dir = '../Figures'
mfpth = '../executables/MODFLOW-NWT_1.0.9/bin/MODFLOW-NWT_64.exe'
mp_exe_name = '../executables/modpath.6_0/bin/mp6.exe'
mf_start_date_str = '01/01/1900'
mp_release_date_str = '01/01/2020'
num_surf_layers = 3
num_depth_groups = 5
por = 0.20
dir_list = []
mod_list = []
i = 0
for home in homes:
if os.path.exists(home):
for dirpath, dirnames, filenames in os.walk(home):
for f in filenames:
if os.path.splitext(f)[-1] == '.nam':
mod = os.path.splitext(f)[0]
mod_list.append(mod)
dir_list.append(dirpath)
i += 1
print(' {} models read'.format(i))
# ## Read and process tracer input file from TracerLPM
# In[ ]:
# read input tracers
tracer_input_raw = pd.read_excel('../data/tracer input/Copy of TracerLPM_V_1_0B.xlsm', skiprows=3, sheetname='StoredTracerData', header=0)
col_list = ['Tracer', 'CFC-12', 'CFC-11', 'CFC-13', 'SF6', '3H', 'NO3-N']
tr_list = ['CFC-12', 'CFC-11', 'CFC-13', 'SF6', '3H', 'NO3-N']
tracer_input_df = tracer_input_raw.loc[:, col_list].copy()
# delete garbage header rows
tracer_input_df = tracer_input_df.iloc[3:, :]
# delete blank rows
tracer_input_df.dropna(axis=0, how='any', inplace=True)
# make sure all the tracer data is numeric
for col in col_list:
tracer_input_df[col] = pd.to_numeric(tracer_input_df[col])
# reverse the date order so that oldest date is first
tracer_input_df = tracer_input_df.iloc[::-1]
# interpolate decimal years to a regular time series
# first change decimal years to equally spaced time series at approximately the same frequency (monthly)
# extract the year from the tracer decimal year
year = tracer_input_df.Tracer.astype(np.int32())
# convert year to a Datetime object
dto = pd.to_datetime(year, format='%Y')
# is it a leap year?
isleap = pd.DatetimeIndex(dto).is_leap_year
# find the number of days in the year
num_days_in_year = np.where(isleap, 366, 365)
# extract the fractional part of the year using modulus division (%)
fraction_of_year = tracer_input_df.Tracer % 1
# find the number of elapsed days within each year
num_days_by_year = fraction_of_year * num_days_in_year
# make the number of days a timedelta object
td = pd.to_timedelta(num_days_by_year, unit='D')
# sum the year (converted to a datetime object) and the timedelta
# make the datetime the index
tracer_input_df.set_index(dto + td, inplace=True)
# create a regular datetime series starting the middle of each month
# the frequency approximates the average month within this time span
freq = 30.436764
freq_str = '{}D'.format(freq)
dates = pd.date_range('1850-01-01', '2020-01-01', freq=freq_str) +
|
pd.Timedelta(days=14)
|
pandas.Timedelta
|
# Import Modulues
#==================================
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from collections import OrderedDict
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.metrics import r2_score
from sklearn.preprocessing import StandardScaler, Normalizer
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.cm as cm
from matplotlib.gridspec import GridSpec
# %%===============================
# Functions
#==================================
# Split a dataset based on an attribute and an attribute value
# Sorts the Training and Testing Datasets based upon a radii size
def test_split(index, value, dataset, num):
train, test = list(), list()
for loca in index:
t=-1
for row in dataset.iloc[:,loca]:
t=t+1
if row == value:
test.append(num[t])
train = list(set(num)-set(test))
return test, train
def test_split_MF(index, value, dataset, num):
train, test = list(), list()
t=-1
for row in dataset.iloc[:,index]:
t=t+1
if value == num[t]:
test.append(t)
train = list(set(dataset.iloc[:,0])-set(test))
return test, train
def test_split_wt(index, value, dataset, num):
train, test = list(), list()
for loca in index:
t=-1
for row in dataset.iloc[:,loca]:
t=t+1
if row in value:
test.append(num[t])
train = list(set(num)-set(test))
test = list(set(num)-set(train))
return test, train
# Identifies the different unique values in a list
def searchValue(index, dataset):
seen, values = set(), list()
uniq = []
for x in dataset.iloc[:,index]:
if x not in seen:
uniq.append(x)
seen.add(x)
uniq.sort()
values = uniq
return values
# %%===============================
# Obtains Data
#==================================
#df = pd.read_excel('MachineLearning_13280.xlsx')
#df = pd.read_excel('MachineLearning_13280_Modified.xlsx')
df = pd.read_excel('Generation 0.xlsx')
df_perm = df
PF_act = df_perm.iloc[:,-1]
# Separates data
#==================================
Run = df['Run ']
ID = df['ID ']
df = df.drop(['Run '],axis=1) #Remove .stat file number
X = df.drop(['Packing_Fraction '],axis=1) #Inputs
Xt = X.drop(['ID '],axis=1) #All features
Xdist = Xt.iloc[:,:9] #No Weight Average
y = df['Packing_Fraction '] #Packing fraction, Output
num = df['ID '] #Number in excel file read under
mf1 = df.iloc[:,:-1] #Mass fraction, particle #1
X_wt = X.iloc[:, 10:] #Weigthed particle data
X_rc = X.iloc[:,20:21]
X_mvs = pd.concat([ID,X_rc], axis=1)
#==================================
# Identifies Data
#==================================
psize = searchValue(1, df) #particle sizes, P1
mfsize = searchValue(7, df) #mass fraction, MF1
wt1size = searchValue(10, df) #Weighted radii, Wr1
wt2size = searchValue(11, df) #Weighted radii, Wr2
wt3size = searchValue(12, df) #Weighted radii, Wr3
wtsize = wt1size + wt2size + wt3size
wtsize = sorted(wtsize)
kf = 10 #Split Size
splits = np.array_split(wtsize,kf) #Split Array
# %%
# =============================================================================
# MOST EXCEPTIONAL SETUP
# =============================================================================
df = df.drop(['Wt_Avg_Pt#_1_Size '],axis=1) #Remove .stat file number
df = df.drop(['Wt_Avg_Pt#_2_Size '],axis=1) #Remove .stat file number
df = df.drop(['Wt_Avg_Pt#_3_Size '],axis=1) #Remove .stat file number
df = df.drop(['Wt_Avg_Pt#_1_Fraction '],axis=1) #Remove .stat file number
df = df.drop(['Wt_Avg_Pt#_2_Fraction '],axis=1) #Remove .stat file number
df = df.drop(['Wt_Avg_Pt#_3_Fraction '],axis=1) #Remove .stat file number
df_main = df
df_main.sort_values(by='Packing_Fraction ',ascending=False)
# Main Test Train Split
# =============================================================================
cutoff = 499 #number of exceptional values
split = 0.25 #percentage used for testing
exceptional = df_main.iloc[0:cutoff, :]
normal = df_main.iloc[cutoff+1 :, :]
df_extra1 = exceptional.sample(frac=split,replace=False)
df_extra2 = exceptional[~exceptional.isin(df_extra1)].dropna()
df_norm1 = normal.sample(frac=split,replace=False)
df_norm2 = normal[~normal.isin(df_norm1)].dropna()
df_test = pd.concat([df_extra1, df_norm1]) #TESTING DATA
df_train_intermediate = pd.concat([df_extra2, df_norm2])
# Training Data Split
# =============================================================================
df_train_intermediate.sort_values(by='Packing_Fraction ',ascending=False)
cutoff2 = int(cutoff*(1-split)) #Number of exceptional passed into training data
excep_train = df_train_intermediate.iloc[0:cutoff2, :] #remainder of exceptional
norm_train = df_train_intermediate.iloc[cutoff2+1 :, :] #remainder of normal
split2 = 0.5 #splits the data evenly
df_extra_val = excep_train.sample(frac=split2,replace=False)
df_extra_train = excep_train[~excep_train.isin(df_extra_val)].dropna()
df_norm_val = norm_train.sample(frac=split2,replace=False)
df_norm_train = norm_train[~norm_train.isin(df_norm_val)].dropna()
df_validate = pd.concat([df_extra_val, df_norm_val]) #VALIDATION DATA
#==============================================================================
df_training = pd.concat([df_extra_train, df_norm_train]) #TRAINING DATA
df_validate_y = df_validate.iloc[:,-1] #Validate Packing Fraction
df_validate = df_validate.drop(['Packing_Fraction '],axis=1) #Validate Inputs
df_validate = df_validate.drop(['ID '],axis=1)
df_test_y = df_test.iloc[:,-1] #Validate Packing Fraction
df_test = df_test.drop(['Packing_Fraction '],axis=1) #Validate Inputs
df_test = df_test.drop(['ID '],axis=1)
# %%===============================
# Evaluate Algorithm
#==================================
index = [0, 1, 2] #Index for weight averaged radii
Rclp = [0, 10^2, 10^3, 10^4, 10^5]
Rcup = [10^2, 10^3, 10^4, 10^5, 10^10]
Rcln = [0, 10^-2, 10^-3, 10^-4, 10^-5]
Rcun = [10^-2, 10^-3, 10^-4, 10^-5, 10^-10]
msplit = list(range(kf))
trainset = 0
tests = 0
stored = list()
train, test = list(), list()
#==================================
# Manual K-fold
#==================================
predictions = []
real_value = []
predictions_train = []
real_value_train = []
for dex in msplit:
value = splits[dex]
stored = stored + test
train, test = list(), list()
test, train = test_split_wt(index,value,X_wt,num)
test = [x for x in test if x not in stored]
train = list(set(num) - set(test))
if len(test)==0: #No data exists to test
break
trainset = trainset + (len(test)/len(train)*100)
tests = tests + len(test)
tes=np.float32(np.asarray(test))
tra=np.float32(np.asarray(train))
#CHANGE ARRRAY VALUE FOR VARIATION
X_train, X_test = Xdist.loc[tra,:], Xdist.loc[tes,:]
# X_train, X_test = X_wt.loc[tra,:], X_wt.loc[tes,:]
# X_train, X_test = Xt.loc[tra,:], Xt.loc[tes,:]
y_train, y_test = y.loc[tra], y.loc[tes]
X_train, X_test = X_train.iloc[:, 0:], X_test.iloc[:, 0:]
X_train=np.float32(np.asarray(X_train))
y_train=np.float32(np.asarray(y_train))
# Training
rf = RandomForestRegressor(n_estimators=100)
rf.fit(X_train, y_train)
# Validation
prediction = rf.predict(X_test)
predictions += list(prediction)
real_value += list(y_test)
prediction_train = rf.predict(X_train)
predictions_train += list(prediction_train)
real_value_train += list(y_train)
# %%===============================
#df2 = pd.read_excel('Files for Predictions.xlsx')
#run = df2['Run ']
#run = run.to_frame()
#df2 = df2.drop(['Run '],axis=1)
#
#y_predicted = rf.predict(df2)
#y_predicted = pd.DataFrame({'Packing Fraction':y_predicted})
#
## Write predicted cases
#ans = pd.concat([run,df2,y_predicted], axis=1, sort=False)
#ans.to_excel("13280Predict-Regressor.xlsx")
# Predictions
#df2 = pd.read_excel('Files for Predictions.xlsx')
df2 = pd.read_excel('Partial 2-model.xlsx')
y_predicted = rf.predict(df2)
y_predicted = pd.DataFrame({'Packing Fraction':y_predicted})
# Write predicted cases
ans =
|
pd.concat([df2,y_predicted], axis=1, sort=False)
|
pandas.concat
|
import pandas as pd
import numpy as np
import os
import mapping as mp
import json
import functools
import collections
import re
import warnings
# control data warnings in object instantation, see
# https://docs.python.org/3/library/warnings.html#the-warnings-filter
WARNINGS = "default"
# non vectorized, related to
# https://github.com/pandas-dev/pandas/issues/21200
if pd.__version__.startswith("0.23."):
def calc_returns(s):
return s.groupby(level=1).apply(lambda x: x.pct_change())
else:
def calc_returns(s):
return s.groupby(level=1).pct_change()
def calc_instr_returns(s, index, limit):
s = mp.util.reindex(s, index, limit)
rets = calc_returns(s).reindex(index)
return rets
class Exposures():
"""
A data container for market data on equities and futures instruments.
Handles looking up prices as well as expiry information for futures. Also
stores exchange, multiplier and tradeable name for each instrument.
"""
_asset_readers = {"future": "read_futures",
"equity": "read_equity"}
_month_map = dict(zip("FGHJKMNQUVXZ", range(1, 13)))
def __init__(self, prices, expiries, meta_data, security_mapping=None):
"""
Parameters:
-----------
prices: dict of pd.DataFrame
A dictionary of pd.DataFrames where the key represents the root
generic name for the instrument (e.g. "ES", "XIV") and the
DataFrame consists of one of the following two types.
For equities the DataFrame should contain the column "adj_close"
and have a DatetimeIndex.
For futures the DataFrame should contain the column "settle" and
have a MultiIndex where the top level is a DatetimeIndex and the
second level consists of instrument names of the form YYYYNNC, e.g.
"2017ESU".
expiries: pd.DataFrame
A pd.DataFrame with columns ["contract", "first_notice",
"last_trade"] where "first_notice" and "last_trade" must be
parseable to datetimes with format %Y-%m-%d and "contract" must be
a string in the form YYYYNNC representing the contract name, e.g.
"2007ESU".
meta_data: pd.DataFrame
A pd.DataFrame of instrument meta data, columns should be names
of root generics, e.g. ["ES", "TY", "XIV"] and index should contain
["exchange", "instrument_type", "multiplier", "generics"] where
"exchange" is a string of the exchange the instrument is traded on,
"instrument_type" is {"future", "equity"}, "multiplier" is an
integer representing the instruments multiplier and "generics" is a
list of generic names for each future and NaN for equities.
security_mapping: function
Callable which returns a market tradeable identifier, if nothing
given default is identity function.
"""
if sorted(list(prices.keys())) != sorted(list(meta_data.columns)):
raise ValueError("'meta_data' must contain data on all keys in "
"'prices'")
self._prices = dict([(key, prices[key].copy()) for key in prices])
self._expiries = self._validate_expiries(expiries)
meta_data = meta_data.copy()
meta_data.columns.name = "root_generic"
generic_futures = meta_data.loc["generics", meta_data.loc["instrument_type"] == "future"] # NOQA
meta_data = meta_data.drop("generics")
self._meta_data = meta_data
future_gnrcs = generic_futures.to_dict()
self._future_root_and_generics = future_gnrcs
roots = []
generics = []
for root in future_gnrcs:
generics.extend(future_gnrcs[root])
roots.extend(len(future_gnrcs[root]) * [root])
self._generic_to_root = dict(zip(generics, roots))
self._future_generics = tuple(sorted(generics))
self._equities = tuple(meta_data.loc[:, meta_data.loc["instrument_type"] == "equity"].columns) # NOQA
self._root_futures = tuple(meta_data.loc[:, meta_data.loc["instrument_type"] == "future"].columns) # NOQA
instruments = []
roots = []
for root in self.root_futures:
instrs = self._prices[root].index.levels[1]
instruments.extend(instrs)
roots.extend(len(instrs) * [root])
self._instrument_to_root = dict(zip(instruments, roots))
self._futures = tuple(instruments)
if security_mapping is None:
def security_mapping(x):
return x
self._to_tradeable = security_mapping
sprice, sexp = set(self.futures), set(self.expiries.contract)
extra_prices = sprice.difference(sexp)
extra_expiries = sexp.difference(sprice)
# warn the user if instantiating instance with price and expiry data
# that do not perfectly coincide
warning = ""
if extra_prices:
warning = (warning + "Futures price data without"
" expiry data:{0}\n".format(extra_prices))
if extra_expiries:
warning = (warning + "Expiry data without futures"
" price data:{0}\n".format(extra_expiries))
if warning:
with warnings.catch_warnings():
warnings.simplefilter(WARNINGS)
warnings.warn(warning)
@classmethod
def _validate_expiries(cls, expiries):
if expiries.contract.duplicated().any():
dupes = expiries.loc[expiries.contract.duplicated(keep=False)]
raise ValueError("Cannot provide multiple rows for same contract"
"in 'expiries'\n{0}".format(dupes))
matches = expiries.contract.str.match("[0-9]{4}[a-zA-Z]{2}[FGHJKMNQUVXZ]{1}$") # NOQA
if not matches.all():
raise ValueError("'expiries' contract column must have specified "
"format\n{0}".format(expiries.loc[~matches, :]))
exp = expiries.copy()
exp.loc[:, "year"] = exp.contract.str[:4].apply(lambda x: int(x))
exp.loc[:, "month"] = exp.contract.str[-1].apply(lambda x:
cls._month_map[x])
exp.loc[:, "root_generic"] = exp.contract.str[4:6]
exp.loc[:, "first_notice"] = pd.to_datetime(exp.loc[:, "first_notice"],
format="%Y-%m-%d")
exp.loc[:, "last_trade"] = pd.to_datetime(exp.loc[:, "last_trade"],
format="%Y-%m-%d")
exp = exp[["contract", "year", "month", "root_generic",
"first_notice", "last_trade"]]
return exp
def __repr__(self):
return (
"Exposures:\n"
"----------\n"
"{0}\n\n"
"Generic Futures: {1}\n"
"Equities: {2}\n").format(self.meta_data, self.future_generics,
self.equities)
@property
def prices(self):
"""
Dictionary of prices
"""
return self._prices
@property
def expiries(self):
"""
pd.DataFrame of futures instrument expiry data.
"""
return self._expiries
@property
def meta_data(self):
"""
pd.DataFrame of instrument meta data
"""
return self._meta_data
@property
def root_futures(self):
"""
Tuple root generic futures, e.g. ("ES", "TY")
"""
return self._root_futures
@property
def equities(self):
"""
Tuple of equities, e.g. ("XIV",)
"""
return self._equities
@property
def future_root_and_generics(self):
"""
Dict with key as root generic and value as list of future generics
"""
return self._future_root_and_generics
@property
def future_generics(self):
"""
Tuple of future generics, e.g. ("ES1", "TY1", "TY2")
"""
return self._future_generics
@property
def futures(self):
"""
Tuple of futures, e.g. ('1997ESU', '1997ESZ')
"""
return self._futures
def generic_to_root(self, generics):
"""
Map generic future to corresponding root future
Parameters:
-----------
generics: list
list of generic instruments
Returns:
--------
List of corresponding root names
"""
roots = []
for gnrc in generics:
roots.append(self._generic_to_root[gnrc])
return roots
def instrument_to_root(self, instruments):
"""
Map future instrument to corresponding root future
Parameters:
-----------
instruments: list
list of futures instruments
Returns:
--------
List of corresponding root names
"""
roots = []
for instr in instruments:
roots.append(self._instrument_to_root[instr])
return roots
def to_tradeable(self, instrument):
"""
Map internal instrument name defined in prices to a market tradeable
name
Parameters:
-----------
instrument: str
Name of instrument
Returns:
--------
Market tradeable identifier
"""
return self._to_tradeable(instrument)
def get_xprices(self, date, root_generics):
"""
Return pd.Series of instrument prices for a given date for a list of
root generics
Parameters:
-----------
date: pd.Timestamp
Desired time for prices
root_generics: iterable
Iterable of strings of root generics to lookup prices for. These
correspond to the keys in the prices dictionnary used to
initialize the Exposures.
Example
-------
exposures.get_xprices(pd.Timestamp("2016-01-01"), ["ES", "XIV"])
"""
futures = [f for f in root_generics if f in self.root_futures]
equities = [f for f in root_generics if f in self.equities]
vals = []
for key in futures:
s = self._prices[key].loc[(date,)].settle
vals.append(s)
for key in equities:
s = self._prices[key].loc[date]
s.index = [key]
vals.append(s)
prices = pd.concat(vals)
prices.name = pd.Timestamp(date)
return prices
@classmethod
def from_folder(cls, meta_file, data_folder, root_generics=None):
"""
Initialize and Exposures instance from a meta information file and a
data folder.
Parameters:
-----------
meta_file: str
File path name to be parsed by Exposures.parse_meta()
data_folder: str
Folder path name to be parsed by Exposures.parse_folder()
root_generics: list or dict
If list is given subset of generic instruments to select from the
instrument meta file. If dict, dict.keys() acts as list for subset
selection and values should be lists of generics.
Returns:
--------
An Exposures instance.
"""
meta_data = cls.parse_meta(meta_file)
if root_generics is not None:
if isinstance(root_generics, list):
meta_data = meta_data.loc[:, root_generics]
else:
meta_data = meta_data.loc[:, root_generics.keys()]
for key in root_generics.keys():
meta_data.loc["generics", key] = root_generics[key]
prices, expiries = cls.parse_folder(data_folder,
meta_data.loc["instrument_type"])
return cls(prices, expiries, meta_data)
@staticmethod
def parse_meta(meta_file):
"""
Parse a json file for instrument meta data.
Parameters:
-----------
meta_file: str
File path of json file. This file should resemble json which can
be used to instantiate a pd.DataFrame used as the meta_data
parameter to instantiate the class. An example file is show below.
{
"ES": {"exchange": "CME", "instrument_type": "future", "multiplier": 50, "generics": ["ES1"]},
"TY": {"exchange": "CME", "instrument_type": "future", "multiplier": 1000, "generics": ["TY1"]},
"XIV": {"exchange": "NASDAQ", "instrument_type": "equity", "multiplier": 1}
}
Returns:
--------
A pd.DataFrame of instrument meta data
""" # NOQA
with open(meta_file) as fp:
meta_data = json.load(fp)
meta_data = pd.DataFrame(meta_data)
return meta_data
@classmethod
def parse_folder(cls, data_folder, instrument_types):
"""
Parse market data folder for instrument prices and expiry information.
Parameters:
-----------
data_folder: str
Folder containing market data files for equities and futures.
An example of the folder structure is shown below. Each instrument
subfolder must be consistent with Exposures.read_futures() and
Exposures.read_equities() respectively. contract_dates.csv must be
readable by Exposures.read_expiries()
marketdata/
contract_dates.csv
ES/
ESH2017.csv
ESM2017.csv
TY/
ESH2017.csv
ESM2017.csv
XIV/
XIV.csv
instrument_types: pd.Series
Series who's index is root generic instrument name and value is the
instrument type {'future', 'equity'}. The instrument_types.index
coincides with the folder names which will be loaded.
Returns:
--------
A tuple of a dictionary of prices and a pd.DataFrame of expiries used
for object instantiation.
"""
price_fldrs = [os.path.join(data_folder, gn)
for gn in instrument_types.index]
prices = {}
for gnrc_fldr, ast_info in zip(price_fldrs, instrument_types.items()):
root_generic, asset_type = ast_info
asset_type_reader = getattr(cls, cls._asset_readers[asset_type])
prices[root_generic] = asset_type_reader(gnrc_fldr)
expiry_file = os.path.join(data_folder, "contract_dates.csv")
futures = instrument_types.loc[instrument_types == "future"].index
expiries = cls.read_expiries(expiry_file, futures)
return (prices, expiries)
@staticmethod
def read_expiries(expiry_file, subset):
"""
Read expiry information on futures instruments. File should contain
columns ["contract", "first_notice", "last_trade"] where "first_notice"
and "last_trade" must be parseable to datetimes with format %Y-%m-%d
and "contract" must be a string in the form YYYYNNC representing the
contract name, e.g. "2007ESU".
Parameters:
-----------
expiry_file: str
csv file to read expiry data.
subset: set
Subset of contracts to keep.
"""
expiries = pd.read_csv(expiry_file)
expiries = expiries.loc[expiries.contract.str[4:6].isin(subset)]
return expiries
@staticmethod
def read_futures(folder, columns=["Settle"]):
"""
Read futures instrument prices.
Parameters:
-----------
folder: str
A folder containing csv files of individual futures instruments
prices. Should contain the column Settle and file names should have
format [A-Z]{3}[1-9][0-9]{3}.csv, e.g "ESU2007.csv"
columns: list
Columns to return from parsed csv files
Returns:
--------
A pd.DataFrame of price data.
"""
def name_func(namestr):
name = os.path.split(namestr)[-1].split('.')[0]
year = name[-4:]
code = name[2]
root_generic = name[:2]
return year + root_generic + code
files = [os.path.join(folder, f) for f in os.listdir(folder)]
bad_files = [f for f in files if not
re.match("[A-Z]{3}[1-9][0-9]{3}.csv",
os.path.split(f)[-1])]
if bad_files:
raise ValueError("The following files are not "
"properly named:\n{0}".format(bad_files))
p = mp.util.read_price_data(files, name_func)
p = p.loc[:, columns]
p.columns = [col.lower().replace(" ", "_") for col in p.columns]
return p
@staticmethod
def read_equity(folder):
"""
Read equity instrument prices.
Parameters:
-----------
folder: str
A folder containing a single csv file of equity prices. File should
contain the column "Adj Close".
Returns:
--------
A pd.DataFrame of price data.
"""
files = os.listdir(folder)
if len(files) > 1:
raise ValueError("%s should contain only one csv file" % folder)
file = os.path.join(folder, files[0])
p = pd.read_csv(file, parse_dates=True, index_col=0)
p = p.loc[:, ["Adj Close"]]
p.columns = [col.lower().replace(" ", "_") for col in p.columns]
return p
def validate_weights_and_rebalances(instrument_weights, rebalance_dates):
"""
Validate that the rebalance dates are compatible with the instrument
weights by checking that the set of roll periods implied by the instrument
weights are contained in the rebalance dates. Raise a ValueError if not
compatible.
Parameters
----------
instrument_weights: dictionary
Dictionary of DataFrames of instrument weights for each root
generic defining roll rules.
rebalance_dates: pd.DatetimeIndex
Dates on which to rebalance
"""
# relates to Relates to https://github.com/matthewgilbert/strategy/issues/3
# validate that transitions in instrument weights are in rebal_dates
for root_generic in instrument_weights:
wts = instrument_weights[root_generic]
wts = wts.sort_index().reset_index(level="contract")
# check if underlying transition matrix is different
trans = (wts.groupby("date").apply(lambda x: x.values))
trans_next = trans.shift(-1).ffill()
changes = ~np.vectorize(np.array_equal)(trans, trans_next)
instr_dts = wts.index.unique()
chng_dts = instr_dts[changes]
invalid_dates = chng_dts.difference(rebalance_dates)
if not invalid_dates.empty:
msg = ("instrument_weights['{0}'] has transition on dates "
"which are not rebalance dates:\n{1}"
.format(root_generic, invalid_dates))
raise ValueError(msg)
class Portfolio():
"""
A Class to manage simulating and generating trades for a trading strategy
on futures and equities. The main features include:
- simulations for trading in notional or discrete instrument size
- user defined roll rules
"""
def __init__(self, exposures, rebalance_dates, mtm_dates,
instrument_weights, initial_capital=100000):
"""
Parameters:
-----------
exposures: Exposures
An Exposures instance containing the asset exposures for trading
and backtesting
rebalance_dates: pd.DatetimeIndex
Dates on which to rebalance
mtm_dates: pd.DatetimeIndex
Dates on which to mark to market the portfolio
instrument_weights: dictionary
Dictionary of DataFrames of instrument weights for each root
generic defining roll rules.
initial_capital: float
Starting capital for backtest
"""
self._exposures = exposures
self._capital = initial_capital
validate_weights_and_rebalances(instrument_weights, rebalance_dates)
self._rebalance_dates = rebalance_dates
self._mtm_dates = mtm_dates
self._instrument_weights = instrument_weights
def __repr__(self):
return (
"Portfolio Initial Capital: {0}\n"
"Portfolio Exposures:\n"
"{1}\n"
"Date Range:\n"
"----------\n"
"Start: {2}\n"
"End: {3}\n"
).format(self._capital, self._exposures,
self._rebalance_dates[0], self._rebalance_dates[-1])
@property
def equities(self):
"""
Return tuple of equities defined in portfolio
"""
return self._exposures.equities
@property
def future_generics(self):
"""
Return tuple of generic futures defined in portfolio
"""
return self._exposures.future_generics
def _split_and_check_generics(self, generics):
if isinstance(generics, pd.Series):
idx = generics.index.tolist()
else:
idx = generics
futs = [f for f in idx if f in self._exposures.future_generics]
eqts = [e for e in idx if e in self._exposures.equities]
if set(futs + eqts) != set(idx):
raise ValueError("generics contains unknown values.\n"
"Received:\n {0}\n"
"Expected in set:\n {1}\n"
.format(sorted(idx), sorted(futs + eqts)))
if isinstance(generics, pd.Series):
futs = generics.loc[futs]
eqts = generics.loc[eqts]
return futs, eqts
def _split_and_check_instruments(self, instruments):
idx = instruments.index.tolist()
futs = [f for f in idx if f in self._exposures.futures]
eqts = [e for e in idx if e in self._exposures.equities]
if set(futs + eqts) != set(idx):
raise ValueError("instruments contains unknown values.\n"
"Received:\n {0}\n"
"Expected in set:\n {1}\n"
.format(sorted(idx), sorted(futs + eqts)))
return instruments.loc[futs], instruments.loc[eqts]
@property
def rebalance_dates(self):
"""
Rebalance days for trading strategy.
Returns
-------
pandas.DatetimeIndex
"""
return self._rebalance_dates
@property
def mtm_dates(self):
"""
Days for marking to market trading strategy.
Returns:
--------
pandas.DatetimeIndex
"""
return self._mtm_dates
@property
def instrument_weights(self):
"""
Dictionary of instrument weights for each root generic defining roll
rules for given dates.
Returns
-------
A dictionary of DataFrames of instrument weights indexed by root
generic, see mapper.mappings.roller()
"""
return self._instrument_weights
def generic_durations(self):
"""
Return a dictionary with root future generics as keys and
pandas.DataFrames of future generic durations.
See also: mapping.util.weighted_expiration()
"""
wts = self.instrument_weights
ltd = self._exposures.expiries.set_index("contract").loc[:, "last_trade"] # NOQA
durations = {}
for generic in wts:
durations[generic] = mp.util.weighted_expiration(wts[generic], ltd)
return durations
@functools.lru_cache(maxsize=1)
def continuous_rets(self):
"""
Calculate asset continuous returns. Continuous futures returns are
defined by the roll rules in instrument_weights().
Returns
-------
pandas.DataFrame of returns
"""
irets = {}
if len(self._exposures.root_futures) > 0:
weights = self.instrument_weights
for ast in self._exposures.root_futures:
widx = weights[ast].index
irets[ast] = calc_instr_returns(
self._exposures.prices[ast].settle, widx, limit=0
)
if irets:
futures_crets = mp.util.calc_rets(irets, weights)
else:
futures_crets = None
eprices = []
equities = self._exposures.equities
for ast in equities:
eprices.append(self._exposures.prices[ast].adj_close)
if eprices:
eprices = pd.concat(eprices, axis=1)
eprices.columns = equities
equity_rets = eprices.pct_change()
else:
equity_rets = None
crets = pd.concat([futures_crets, equity_rets], axis=1)
crets = crets.sort_index(axis=1)
return crets.loc[self.mtm_dates, :]
def simulate(self, signal, tradeables=False, rounder=None,
reinvest=True, risk_target=0.12):
"""
Simulate trading strategy with or without discrete trade sizes and
revinvested. Holdings are marked to market for each day in
mtm_dates.
Parameters
----------
signal: pd.DataFrame
Allocations to generic instruments through time. Must contain
values for all rebalance_dates(). This is used as the input to
trade() or when tradeables=False directly scaled by risk target
and capital.
tradeables: boolean
Calculate trades in notional space or use actual fixed size
contracts, i.e. discrete trade sizes
rounder: function
Function to round pd.Series contracts to integers, if None default
pd.Series.round is used.
reinvest: boolean
Whether PnL is added/subtracted from the capital, i.e. have a
capital base that is time varying versus running a constant amount
of capital.
risk_target: float
Percentage of capital risk to run, used as input to trade().
Returns
-------
Tuple of "holdings", "trades" and "pnl" which refer to a DataFrame of
notional holdings, a DataFrame of notional trades and a Series of
portfolio pnl respectively.
"""
if not signal.columns.is_unique:
raise ValueError("signal must have unique columns")
# validate signal data prior to running a simulation, avoid slow
# runtime error
rebal_dates = self.rebalance_dates
missing = ~rebal_dates.isin(signal.index)
if missing.any():
raise ValueError("'signal' must contain values for "
"dates %s" % rebal_dates[missing])
futures, _ = self._split_and_check_generics(signal.columns)
futures = set(futures)
# validate pricing data exists for at least one instrument for the
# dates where there is signal data
for ast in signal.columns:
req_price_dts = signal.loc[rebal_dates, ast].dropna().index
if ast in futures:
ast = self._exposures.generic_to_root([ast])[0]
try:
price_dts = self._exposures.prices[ast].index.levels[0]
except AttributeError:
price_dts = self._exposures.prices[ast].index
isin = req_price_dts.isin(price_dts)
if not isin.all():
raise ValueError("Price data in Exposures contained within "
"Portfolio must contain prices for "
"'rebalance_dates' when 'signal' is "
"not NaN, {0} needs prices for:"
"\n{1}\n".format(ast, req_price_dts[~isin]))
returns = self.continuous_rets()
capital = self._capital
current_exp = pd.Series(0, signal.columns)
trade_lst = []
trd_dts = []
notional_exposures = []
returns = returns.fillna(value=0)
pnls = []
crnt_instrs = 0
tradeable_dates = self.mtm_dates
for i, dt in enumerate(tradeable_dates):
# exposure from time dt - 1
daily_pnl = (current_exp * returns.loc[dt]).sum()
pnls.append(daily_pnl)
if reinvest:
capital += daily_pnl
# update exposures to time dt
current_exp = current_exp * (1 + returns.loc[dt])
if dt in rebal_dates:
if tradeables:
sig_t = signal.loc[dt].dropna()
futs, eqts = self._split_and_check_generics(sig_t.index)
rt_futs = self._exposures.generic_to_root(futs)
# call set() to avoid duplicate rt_futs for cases with
# multiple generics, e.g. ES1, ES2
prices_t = self._exposures.get_xprices(dt, set(rt_futs + eqts)) # NOQA
# this is quite hacky but needed to deal with the fact that
# weights on the same day before and after a transition are
# different
# see https://github.com/matthewgilbert/strategy/issues/1
dt_next = tradeable_dates[i + 1]
trds = self.trade(dt_next, crnt_instrs, sig_t, prices_t,
capital, risk_target, rounder)
new_exp = self.notional_exposure(dt_next, crnt_instrs,
trds, prices_t)
# account for fact that 'trds' mapped to 'new_exp'
# (generic notionals) might not span all previous generic
# holdings, which should be interpreted as having 0
# exposure to this generic now
new_exp = new_exp.reindex(current_exp.index).fillna(0)
# current_exp and new_exp might differ by epsilon because
# current_exp is based on compounded returns vs current
# prices
trd_ntl = (new_exp - current_exp).round(4)
current_exp = new_exp
crnt_instrs = trds.add(crnt_instrs, fill_value=0)
crnt_instrs = crnt_instrs.loc[crnt_instrs != 0]
else:
trd_ntl = (signal.loc[dt] * capital * risk_target -
current_exp)
current_exp = signal.loc[dt] * capital * risk_target
trade_lst.append(trd_ntl)
trd_dts.append(dt)
notional_exposures.append(current_exp)
trades = pd.concat(trade_lst, axis=1, keys=rebal_dates).T
notional_exposures = pd.concat(notional_exposures, axis=1,
keys=tradeable_dates).T
pnls = pd.Series(pnls, index=tradeable_dates)
container = collections.namedtuple("sim_result",
["holdings", "trades", "pnl"])
return container(notional_exposures, trades, pnls)
def trade(self, date, instrument_holdings, unit_risk_exposures, prices,
capital, risk_target, rounder=None):
"""
Generate instrument trade list.
Parameters:
-----------
date: pandas.Timestamp
Date for trade
instrument_holdings: pandas.Series
Current instrument holdings as integer number of contracts. Can
pass 0 if there are no instrument holdings.
unit_risk_exposures: pandas.Series
Unit risk exposure of desired holdings in generics
prices: pandas.Series
Prices for instruments to be traded
capital: float
Amount of capital to invest
risk_target: float
Percentage of capital risk to run
rounder: function
Function to round pd.Series contracts to integers, if None default
pd.Series.round is used.
Returns:
--------
pandas.Series of instrument trades.
"""
if rounder is None:
rounder = pd.Series.round
weights = {}
for root in self.instrument_weights:
weights[root] = self.instrument_weights[root].loc[[date], :]
# to support passing 0 as a proxy to all empty holdings
if isinstance(instrument_holdings, pd.Series):
if not instrument_holdings.index.is_unique:
raise ValueError("instrument_holdings must have unique index")
ih_fut, ih_eqt = self._split_and_check_instruments(
instrument_holdings
)
elif instrument_holdings == 0:
ih_fut, ih_eqt = (0, 0)
else:
raise TypeError("instrument_holdings must be pd.Series or 0")
dollar_desired_hlds = capital * risk_target * unit_risk_exposures
ddh_fut, ddh_eqt = self._split_and_check_generics(dollar_desired_hlds)
price_fut, price_eqt = self._split_and_check_instruments(prices)
eq_trades = rounder(ddh_eqt.divide(price_eqt) - ih_eqt)
root_futs = set(self._exposures.generic_to_root(ddh_fut.index))
weights = dict([(r, weights[r].loc[(date,)]) for r in root_futs])
root_fut_mults = self._exposures.meta_data.loc["multiplier", root_futs]
fut_mults = mp.util.get_multiplier(weights, root_fut_mults)
fut_trds = mp.util.calc_trades(ih_fut, ddh_fut, weights, price_fut,
rounder=rounder, multipliers=fut_mults)
return
|
pd.concat([eq_trades, fut_trds])
|
pandas.concat
|
# -*- coding: utf-8 -*-
import sys, os
import datetime, time
from math import ceil, floor # ceil : 소수점 이하를 올림, floor : 소수점 이하를 버림
import math
import pickle
import uuid
import base64
import subprocess
from subprocess import Popen
import PyQt5
from PyQt5 import QtCore, QtGui, uic
from PyQt5 import QAxContainer
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import (QApplication, QLabel, QLineEdit, QMainWindow, QDialog, QMessageBox, QProgressBar)
from PyQt5.QtWidgets import *
from PyQt5.QAxContainer import *
import numpy as np
from numpy import NaN, Inf, arange, isscalar, asarray, array
import pandas as pd
import pandas.io.sql as pdsql
from pandas import DataFrame, Series
# Google SpreadSheet Read/Write
import gspread # (추가 설치 모듈)
from oauth2client.service_account import ServiceAccountCredentials # (추가 설치 모듈)
from df2gspread import df2gspread as d2g # (추가 설치 모듈)
from string import ascii_uppercase # 알파벳 리스트
from bs4 import BeautifulSoup
import requests
import logging
import logging.handlers
import sqlite3
import telepot # 텔레그램봇(추가 설치 모듈)
from slacker import Slacker # 슬랙봇(추가 설치 모듈)
import csv
import FinanceDataReader as fdr
# Google Spreadsheet Setting *******************************
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
json_file_name = './secret/xtrader-276902-f5a8b77e2735.json'
credentials = ServiceAccountCredentials.from_json_keyfile_name(json_file_name, scope)
gc = gspread.authorize(credentials)
# XTrader-Stocklist URL
# spreadsheet_url = 'https://docs.google.com/spreadsheets/d/1pLi849EDnjZnaYhphkLButple5bjl33TKZrCoMrim3k/edit#gid=0' # Test Sheet
spreadsheet_url = 'https://docs.google.com/spreadsheets/d/1XE4sk0vDw4fE88bYMDZuJbnP4AF9CmRYHKY6fCXABw4/edit#gid=0' # Sheeet
testsheet_url = 'https://docs.google.com/spreadsheets/d/1pLi849EDnjZnaYhphkLButple5bjl33TKZrCoMrim3k/edit#gid=0'
# spreadsheet 연결 및 worksheet setting
doc = gc.open_by_url(spreadsheet_url)
doc_test = gc.open_by_url(testsheet_url)
shortterm_buy_sheet = doc.worksheet('매수모니터링')
shortterm_sell_sheet = doc.worksheet('매도모니터링')
shortterm_strategy_sheet = doc.worksheet('ST bot')
shortterm_history_sheet = doc.worksheet('매매이력')
condition_history_sheet = doc_test.worksheet('조건식이력')
price_monitoring_sheet = doc_test.worksheet('주가모니터링')
shortterm_history_cols = ['번호', '종목명', '매수가', '매수수량', '매수일', '매수전략', '매수조건', '매도가', '매도수량',
'매도일', '매도전략', '매도구간', '수익률(계산)','수익률', '수익금', '세금+수수료', '확정 수익금']
shortterm_analysis_cols = ['번호', '종목명', '우선순위', '일봉1', '일봉2', '일봉3', '일봉4', '주봉1', '월봉1', '거래량', '기관수급', '외인수급', '개인']
condition_history_cols = ['종목명', '매수가', '매수일','매도가', '매도일', '수익률(계산)', '수익률', '수익금', '세금+수수료']
# 구글 스프레드시트 업데이트를 위한 알파벳리스트(열 이름 얻기위함)
alpha_list = list(ascii_uppercase)
# SQLITE DB Setting *****************************************
DATABASE = 'stockdata.db'
def sqliteconn():
conn = sqlite3.connect(DATABASE)
return conn
# DB에서 종목명으로 종목코드, 종목영, 시장구분 반환
def get_code(종목명체크):
# 종목명이 띄워쓰기, 대소문자 구분이 잘못될 것을 감안해서
# DB 저장 시 종목명체크 컬럼은 띄워쓰기 삭제 및 소문자로 저장됨
# 구글에서 받은 종목명을 띄워쓰기 삭제 및 소문자로 바꿔서 종목명체크와 일치하는 데이터 저장
# 종목명은 DB에 있는 정상 종목명으로 사용하도록 리턴
종목명체크 = 종목명체크.lower().replace(' ', '')
query = """
select 종목코드, 종목명, 시장구분
from 종목코드
where (종목명체크 = '%s')
""" % (종목명체크)
conn = sqliteconn()
df = pd.read_sql(query, con=conn)
conn.close()
return list(df[['종목코드', '종목명', '시장구분']].values)[0]
# 종목코드가 int형일 경우 정상적으로 반환
def fix_stockcode(data):
if len(data)< 6:
for i in range(6 - len(data)):
data = '0'+data
return data
# 구글 스프레드 시트 Import후 DataFrame 반환
def import_googlesheet():
try:
# 1. 매수 모니터링 시트 체크 및 매수 종목 선정
row_data = shortterm_buy_sheet.get_all_values() # 구글 스프레드시트 '매수모니터링' 시트 데이터 get
# 작성 오류 체크를 위한 주요 항목의 위치(index)를 저장
idx_strategy = row_data[0].index('기본매도전략')
idx_buyprice = row_data[0].index('매수가1')
idx_sellprice = row_data[0].index('목표가')
# DB에서 받아올 종목코드와 시장 컬럼 추가
# 번호, 종목명, 매수모니터링, 비중, 시가위치, 매수가1, 매수가2, 매수가3, 기존매도전략, 목표가
row_data[0].insert(2, '종목코드')
row_data[0].insert(3, '시장')
for row in row_data[1:]:
try:
code, name, market = get_code(row[1]) # 종목명으로 종목코드, 종목명, 시장 받아서(get_code 함수) 추가
except Exception as e:
name = ''
code = ''
market = ''
print('구글 매수모니터링 시트 종목명 오류 : %s' % (row[1]))
logger.error('구글 매수모니터링 시트 오류 : %s' % (row[1]))
Telegram('[XTrader]구글 매수모니터링 시트 오류 : %s' % (row[1]))
row[1] = name # 정상 종목명으로 저장
row.insert(2, code)
row.insert(3, market)
data = pd.DataFrame(data=row_data[1:], columns=row_data[0])
# 사전 데이터 정리
data = data[(data['매수모니터링'] == '1') & (data['종목코드']!= '')]
data = data[row_data[0][:row_data[0].index('목표가')+1]]
del data['매수모니터링']
data.to_csv('%s_googlesheetdata.csv'%(datetime.date.today().strftime('%Y%m%d')), encoding='euc-kr', index=False)
# 2. 매도 모니터링 시트 체크(번호, 종목명, 보유일, 매도전략, 매도가)
row_data = shortterm_sell_sheet.get_all_values() # 구글 스프레드시트 '매도모니터링' 시트 데이터 get
# 작성 오류 체크를 위한 주요 항목의 위치(index)를 저장
idx_holding = row_data[0].index('보유일')
idx_strategy = row_data[0].index('매도전략')
idx_loss = row_data[0].index('손절가')
idx_sellprice = row_data[0].index('목표가')
if len(row_data) > 1:
for row in row_data[1:]:
try:
code, name, market = get_code(row[1]) # 종목명으로 종목코드, 종목명, 시장 받아서(get_code 함수) 추가
if row[idx_holding] == '' : raise Exception('보유일 오류')
if row[idx_strategy] == '': raise Exception('매도전략 오류')
if row[idx_loss] == '': raise Exception('손절가 오류')
if row[idx_strategy] == '4' and row[idx_sellprice] == '': raise Exception('목표가 오류')
except Exception as e:
if str(e) != '보유일 오류' and str(e) != '매도전략 오류' and str(e) != '손절가 오류'and str(e) != '목표가 오류': e = '종목명 오류'
print('구글 매도모니터링 시트 오류 : %s, %s' % (row[1], e))
logger.error('구글 매도모니터링 시트 오류 : %s, %s' % (row[1], e))
Telegram('[XTrader]구글 매도모니터링 시트 오류 : %s, %s' % (row[1], e))
# print(data)
print('[XTrader]구글 시트 확인 완료')
# Telegram('[XTrader]구글 시트 확인 완료')
# logger.info('[XTrader]구글 시트 확인 완료')
return data
except Exception as e:
# 구글 시트 import error시 에러 없어을 때 백업한 csv 읽어옴
print("import_googlesheet Error : %s"%e)
logger.error("import_googlesheet Error : %s"%e)
backup_file = datetime.date.today().strftime('%Y%m%d') + '_googlesheetdata.csv'
if backup_file in os.listdir():
data = pd.read_csv(backup_file, encoding='euc-kr')
data = data.fillna('')
data = data.astype(str)
data['종목코드'] = data['종목코드'].apply(fix_stockcode)
print("import googlesheet backup_file")
logger.info("import googlesheet backup_file")
return data
# Telegram Setting *****************************************
with open('./secret/telegram_token.txt', mode='r') as tokenfile:
TELEGRAM_TOKEN = tokenfile.readline().strip()
with open('./secret/chatid.txt', mode='r') as chatfile:
CHAT_ID = int(chatfile.readline().strip())
bot = telepot.Bot(TELEGRAM_TOKEN)
with open('./secret/Telegram.txt', mode='r') as tokenfile:
r = tokenfile.read()
TELEGRAM_TOKEN_yoo = r.split('\n')[0].split(', ')[1]
CHAT_ID_yoo = r.split('\n')[1].split(', ')[1]
bot_yoo = telepot.Bot(TELEGRAM_TOKEN_yoo)
telegram_enable = True
def Telegram(str, send='all'):
try:
if telegram_enable == True:
# if send == 'mc':
# bot.sendMessage(CHAT_ID, str)
# else:
# bot.sendMessage(CHAT_ID, str)
# bot_yoo.sendMessage(CHAT_ID_yoo, str)
bot.sendMessage(CHAT_ID, str)
else:
pass
except Exception as e:
Telegram('[StockTrader]Telegram Error : %s' % e, send='mc')
# Slack Setting ***********************************************
# with open('./secret/slack_token.txt', mode='r') as tokenfile:
# SLACK_TOKEN = tokenfile.readline().strip()
# slack = Slacker(SLACK_TOKEN)
# slack_enable = False
# def Slack(str):
# if slack_enable == True:
# slack.chat.post_message('#log', str)
# else:
# pass
# 매수 후 보유기간 계산 *****************************************
today = datetime.date.today()
def holdingcal(base_date, excluded=(6, 7)): # 예시 base_date = '2018-06-23'
yy = int(base_date[:4]) # 연도
mm = int(base_date[5:7]) # 월
dd = int(base_date[8:10]) # 일
base_d = datetime.date(yy, mm, dd)
delta = 0
while base_d <= today:
if base_d.isoweekday() not in excluded:
delta += 1
base_d += datetime.timedelta(days=1)
return delta # 당일도 1일로 계산됨
# 호가 계산(상한가, 현재가) *************************************
def hogacal(price, diff, market, option):
# diff 0 : 상한가 호가, -1 : 상한가 -1호가
if option == '현재가':
cal_price = price
elif option == '상한가':
cal_price = price * 1.3
if cal_price < 1000:
hogaunit = 1
elif cal_price < 5000:
hogaunit = 5
elif cal_price < 10000:
hogaunit = 10
elif cal_price < 50000:
hogaunit = 50
elif cal_price < 100000 and market == "KOSPI":
hogaunit = 100
elif cal_price < 500000 and market == "KOSPI":
hogaunit = 500
elif cal_price >= 500000 and market == "KOSPI":
hogaunit = 1000
elif cal_price >= 50000 and market == "KOSDAQ":
hogaunit = 100
cal_price = int(cal_price / hogaunit) * hogaunit + (hogaunit * diff)
return cal_price
# 종목별 현재가 크롤링 ******************************************
def crawler_price(code):
code = code[1:]
url = 'https://finance.naver.com/item/sise.nhn?code=%s' % (code)
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
tag = soup.find("td", {"class": "num"})
return int(tag.text.replace(',',''))
로봇거래계좌번호 = None
주문딜레이 = 0.25
초당횟수제한 = 5
## 키움증권 제약사항 - 3.7초에 한번 읽으면 지금까지는 괜찮음
주문지연 = 3700 # 3.7초
로봇스크린번호시작 = 9000
로봇스크린번호종료 = 9999
# Table View 데이터 정리
class PandasModel(QtCore.QAbstractTableModel):
def __init__(self, data=None, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self._data = data
if data is None:
self._data = DataFrame()
def rowCount(self, parent=None):
# return len(self._data.values)
return len(self._data.index)
def columnCount(self, parent=None):
return self._data.columns.size
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
if role == Qt.DisplayRole:
# return QtCore.QVariant(str(self._data.values[index.row()][index.column()]))
return str(self._data.values[index.row()][index.column()])
# return QtCore.QVariant()
return None
def headerData(self, column, orientation, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
return self._data.columns[column]
return int(column + 1)
def update(self, data):
self._data = data
self.reset()
def reset(self):
self.beginResetModel()
# unnecessary call to actually clear data, but recommended by design guidance from Qt docs
# left blank in preliminary testing
self.endResetModel()
def flags(self, index):
return QtCore.Qt.ItemIsEnabled
# 포트폴리오에 사용되는 주식정보 클래스
# TradeShortTerm용 포트폴리오
class CPortStock_ShortTerm(object):
def __init__(self, 번호, 매수일, 종목코드, 종목명, 시장, 매수가, 매수조건, 보유일, 매도전략, 매도구간별조건, 매도구간=1, 매도가=0, 수량=0):
self.번호 = 번호
self.매수일 = 매수일
self.종목코드 = 종목코드
self.종목명 = 종목명
self.시장 = 시장
self.매수가 = 매수가
self.매수조건 = 매수조건
self.보유일 = 보유일
self.매도전략 = 매도전략
self.매도구간별조건 = 매도구간별조건
self.매도구간 = 매도구간
self.매도가 = 매도가
self.수량 = 수량
if self.매도전략 == '2' or self.매도전략 == '3':
self.목표도달 = False # 목표가(매도가) 도달 체크(False 상태로 구간 컷일경우 전량 매도)
self.매도조건 = '' # 구간매도 : B, 목표매도 : T
elif self.매도전략 == '4':
self.sellcount = 0
self.매도단위수량 = 0 # 전략4의 기본 매도 단위는 보유수량의 1/3
self.익절가1도달 = False
self.익절가2도달 = False
self.목표가도달 = False
# TradeLongTerm용 포트폴리오
class CPortStock_LongTerm(object):
def __init__(self, 매수일, 종목코드, 종목명, 시장, 매수가, 수량=0):
self.매수일 = 매수일
self.종목코드 = 종목코드
self.종목명 = 종목명
self.시장 = 시장
self.매수가 = 매수가
self.수량 = 수량
# 기본 로봇용 포트폴리오
class CPortStock(object):
def __init__(self, 매수일, 종목코드, 종목명, 시장, 매수가, 보유일, 매도전략, 매도구간=0, 매도전략변경1=False, 매도전략변경2=False, 수량=0):
self.매수일 = 매수일
self.종목코드 = 종목코드
self.종목명 = 종목명
self.시장 = 시장
self.매수가 = 매수가
self.보유일 = 보유일
self.매도전략 = 매도전략
self.매도구간 = 매도구간
self.매도전략변경1 = 매도전략변경1
self.매도전략변경2 = 매도전략변경2
self.수량 = 수량
# CTrade 거래로봇용 베이스클래스 : OpenAPI와 붙어서 주문을 내는 등을 하는 클래스
class CTrade(object):
def __init__(self, sName, UUID, kiwoom=None, parent=None):
"""
:param sName: 로봇이름
:param UUID: 로봇구분용 id
:param kiwoom: 키움OpenAPI
:param parent: 나를 부른 부모 - 보통은 메인윈도우
"""
# print("CTrade : __init__")
self.sName = sName
self.UUID = UUID
self.sAccount = None # 거래용계좌번호
self.kiwoom = kiwoom
self.parent = parent
self.running = False # 실행상태
self.portfolio = dict() # 포트폴리오 관리 {'종목코드':종목정보}
self.현재가 = dict() # 각 종목의 현재가
# 조건 검색식 종목 읽기
def GetCodes(self, Index, Name, Type):
logger.info("[%s]조건 검색식 종목 읽기"%(self.sName))
# self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
# self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
# self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
try:
self.getConditionLoad()
print('getload 완료')
print('조건 검색 :', Name, int(Index), Type)
codelist = self.sendCondition("0156", Name, int(Index), Type) # 선정된 검색조건식으로 바로 종목 검색
print('GetCodes :', self.codeList)
return self.codeList
except Exception as e:
print("GetCondition_Error")
print(e)
def getConditionLoad(self):
print('getConditionLoad')
self.kiwoom.dynamicCall("GetConditionLoad()")
# receiveConditionVer() 이벤트 메서드에서 루프 종료
self.ConditionLoop = QEventLoop()
self.ConditionLoop.exec_()
def getConditionNameList(self):
print('getConditionNameList')
data = self.kiwoom.dynamicCall("GetConditionNameList()")
conditionList = data.split(';')
del conditionList[-1]
conditionDictionary = {}
for condition in conditionList:
key, value = condition.split('^')
conditionDictionary[int(key)] = value
# print(conditionDictionary)
return conditionDictionary
# 조건식 조회
def sendCondition(self, screenNo, conditionName, conditionIndex, isRealTime):
print("CTrade : sendCondition", screenNo, conditionName, conditionIndex, isRealTime)
isRequest = self.kiwoom.dynamicCall("SendCondition(QString, QString, int, int)",
screenNo, conditionName, conditionIndex, isRealTime)
# receiveTrCondition() 이벤트 메서드에서 루프 종료
# 실시간 검색일 경우 Loop 미적용해서 바로 조회 등록이 되게 해야됨
# if self.조건검색타입 ==0:
self.ConditionLoop = QEventLoop()
self.ConditionLoop.exec_()
# 조건식 조회 중지
def sendConditionStop(self, screenNo, conditionName, conditionIndex):
# print("CTrade : sendConditionStop", screenNo, conditionName, conditionIndex)
isRequest = self.kiwoom.dynamicCall("SendConditionStop(QString, QString, int)",
screenNo, conditionName, conditionIndex)
# 계좌 보유 종목 받음
def InquiryList(self, _repeat=0):
# print("CTrade : InquiryList")
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.sAccount)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "비밀번호입력매체구분", '00')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "조회구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "계좌평가잔고내역요청", "opw00018", _repeat, '{:04d}'.format(self.sScreenNo))
self.InquiryLoop = QEventLoop() # 로봇에서 바로 쓸 수 있도록하기 위해서 계좌 조회해서 종목을 받고나서 루프해제시킴
self.InquiryLoop.exec_()
# 금일 매도 종목에 대해서 수익률, 수익금, 수수료 요청(일별종목별실현손익요청)
def DailyProfit(self, 금일매도종목):
_repeat = 0
# self.sAccount = 로봇거래계좌번호
# self.sScreenNo = self.ScreenNumber
시작일자 = datetime.date.today().strftime('%Y%m%d')
cnt = 1
for 종목코드 in 금일매도종목:
# print(self.sScreenNo, 종목코드, 시작일자)
self.update_cnt = len(금일매도종목) - cnt
cnt += 1
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.sAccount)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", 종목코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "시작일자", 시작일자)
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "일자별종목별실현손익요청", "OPT10072",
_repeat, '{:04d}'.format(self.sScreenNo))
self.DailyProfitLoop = QEventLoop() # 로봇에서 바로 쓸 수 있도록하기 위해서 계좌 조회해서 종목을 받고나서 루프해제시킴
self.DailyProfitLoop.exec_()
# 일별종목별실현손익 응답 결과 구글 업로드
def DailyProfitUpload(self, 매도결과):
# 매도결과 ['종목명','체결량','매입단가','체결가','당일매도손익','손익율','당일매매수수료','당일매매세금']
print(매도결과)
if self.sName == 'TradeShortTerm':
history_sheet = shortterm_history_sheet
history_cols = shortterm_history_cols
elif self.sName == 'TradeCondition':
history_sheet = condition_history_sheet
history_cols = condition_history_cols
try:
code_row = history_sheet.findall(매도결과[0])[-1].row
계산수익률 = round((int(float(매도결과[3])) / int(float(매도결과[2])) - 1) * 100, 2)
cell = alpha_list[history_cols.index('매수가')] + str(code_row) # 매입단가
history_sheet.update_acell(cell, int(float(매도결과[2])))
cell = alpha_list[history_cols.index('매도가')] + str(code_row) # 체결가
history_sheet.update_acell(cell, int(float(매도결과[3])))
cell = alpha_list[history_cols.index('수익률(계산)')] + str(code_row) # 수익률 계산
history_sheet.update_acell(cell, 계산수익률)
cell = alpha_list[history_cols.index('수익률')] + str(code_row) # 손익율
history_sheet.update_acell(cell, 매도결과[5])
cell = alpha_list[history_cols.index('수익금')] + str(code_row) # 손익율
history_sheet.update_acell(cell, int(float(매도결과[4])))
cell = alpha_list[history_cols.index('세금+수수료')] + str(code_row) # 당일매매수수료 + 당일매매세금
history_sheet.update_acell(cell, int(float(매도결과[6])) + int(float(매도결과[7])))
self.DailyProfitLoop.exit()
if self.update_cnt == 0:
print('금일 실현 손익 구글 업로드 완료')
Telegram("[StockTrader]금일 실현 손익 구글 업로드 완료")
logger.info("[StockTrader]금일 실현 손익 구글 업로드 완료")
except:
self.DailyProfitLoop.exit() # 강제 루프 해제
print('[StockTrader]CTrade:DailyProfitUpload_%s 매도 이력 없음' % 매도결과[0])
logger.error('CTrade:DailyProfitUpload_%s 매도 이력 없음' % 매도결과[0])
# 포트폴리오의 상태
def GetStatus(self):
# print("CTrade : GetStatus")
try:
result = []
for p, v in self.portfolio.items():
result.append('%s(%s)[P%s/V%s/D%s]' % (v.종목명.strip(), v.종목코드, v.매수가, v.수량, v.매수일))
return [self.__class__.__name__, self.sName, self.UUID, self.sScreenNo, self.running, len(self.portfolio), ','.join(result)]
except Exception as e:
print('CTrade_GetStatus Error', e)
logger.error('CTrade_GetStatus Error : %s' % e)
def GenScreenNO(self):
"""
:return: 키움증권에서 요구하는 스크린번호를 생성
"""
# print("CTrade : GenScreenNO")
self.SmallScreenNumber += 1
if self.SmallScreenNumber > 9999:
self.SmallScreenNumber = 0
return self.sScreenNo * 10000 + self.SmallScreenNumber
def GetLoginInfo(self, tag):
"""
:param tag:
:return: 로그인정보 호출
"""
# print("CTrade : GetLoginInfo")
return self.kiwoom.dynamicCall('GetLoginInfo("%s")' % tag)
def KiwoomConnect(self):
"""
:return: 키움증권OpenAPI의 CallBack에 대응하는 처리함수를 연결
"""
# print("CTrade : KiwoomConnect")
try:
self.kiwoom.OnEventConnect[int].connect(self.OnEventConnect)
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
self.kiwoom.OnReceiveChejanData[str, int, str].connect(self.OnReceiveChejanData)
self.kiwoom.OnReceiveRealData[str, str, str].connect(self.OnReceiveRealData)
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].connect(self.OnReceiveTrCondition)
self.kiwoom.OnReceiveConditionVer[int, str].connect(self.OnReceiveConditionVer)
self.kiwoom.OnReceiveRealCondition[str, str, str, str].connect(self.OnReceiveRealCondition)
except Exception as e:
print("CTrade : [%s]KiwoomConnect Error :"&(self.sName, e))
# logger.info("%s : connected" % self.sName)
def KiwoomDisConnect(self):
"""
:return: Callback 연결해제
"""
# print("CTrade : KiwoomDisConnect")
try:
self.kiwoom.OnEventConnect[int].disconnect(self.OnEventConnect)
except Exception:
pass
try:
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
except Exception:
pass
try:
self.kiwoom.OnReceiveTrCondition[str, str, str, int, int].disconnect(self.OnReceiveTrCondition)
except Exception:
pass
try:
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
except Exception:
pass
try:
self.kiwoom.OnReceiveChejanData[str, int, str].disconnect(self.OnReceiveChejanData)
except Exception:
pass
try:
self.kiwoom.OnReceiveConditionVer[int, str].disconnect(self.OnReceiveConditionVer)
except Exception:
pass
try:
self.kiwoom.OnReceiveRealCondition[str, str, str, str].disconnect(self.OnReceiveRealCondition)
except Exception:
pass
try:
self.kiwoom.OnReceiveRealData[str, str, str].disconnect(self.OnReceiveRealData)
except Exception:
pass
# logger.info("%s : disconnected" % self.sName)
def KiwoomAccount(self):
"""
:return: 계좌정보를 읽어옴
"""
# print("CTrade : KiwoomAccount")
ACCOUNT_CNT = self.GetLoginInfo('ACCOUNT_CNT')
ACC_NO = self.GetLoginInfo('ACCNO')
self.account = ACC_NO.split(';')[0:-1]
self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", self.account[0])
self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "d+2예수금요청", "opw00001", 0, '{:04d}'.format(self.sScreenNo))
self.depositLoop = QEventLoop() # self.d2_deposit를 로봇에서 바로 쓸 수 있도록하기 위해서 예수금을 받고나서 루프해제시킴
self.depositLoop.exec_()
# logger.debug("보유 계좌수: %s 계좌번호: %s [%s]" % (ACCOUNT_CNT, self.account[0], ACC_NO))
def KiwoomSendOrder(self, sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo):
"""
OpenAPI 메뉴얼 참조
:param sRQName:
:param sScreenNo:
:param sAccNo:
:param nOrderType:
:param sCode:
:param nQty:
:param nPrice:
:param sHogaGb:
:param sOrgOrderNo:
:return:
"""
# print("CTrade : KiwoomSendOrder")
try:
order = self.kiwoom.dynamicCall(
'SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)',
[sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo])
return order
except Exception as e:
print('CTrade_KiwoomSendOrder Error ', e)
Telegram('[StockTrader]CTrade_KiwoomSendOrder Error: %s' % e, send='mc')
logger.error('CTrade_KiwoomSendOrder Error : %s' % e)
# -거래구분값 확인(2자리)
#
# 00 : 지정가
# 03 : 시장가
# 05 : 조건부지정가
# 06 : 최유리지정가
# 07 : 최우선지정가
# 10 : 지정가IOC
# 13 : 시장가IOC
# 16 : 최유리IOC
# 20 : 지정가FOK
# 23 : 시장가FOK
# 26 : 최유리FOK
# 61 : 장전 시간외단일가매매
# 81 : 장후 시간외종가
# 62 : 시간외단일가매매
#
# -매매구분값 (1 자리)
# 1 : 신규매수
# 2 : 신규매도
# 3 : 매수취소
# 4 : 매도취소
# 5 : 매수정정
# 6 : 매도정정
def KiwoomSetRealReg(self, sScreenNo, sCode, sRealType='0'):
"""
OpenAPI 메뉴얼 참조
:param sScreenNo:
:param sCode:
:param sRealType:
:return:
"""
# print("CTrade : KiwoomSetRealReg")
ret = self.kiwoom.dynamicCall('SetRealReg(QString, QString, QString, QString)', sScreenNo, sCode, '9001;10',
sRealType)
return ret
def KiwoomSetRealRemove(self, sScreenNo, sCode):
"""
OpenAPI 메뉴얼 참조
:param sScreenNo:
:param sCode:
:return:
"""
# print("CTrade : KiwoomSetRealRemove")
ret = self.kiwoom.dynamicCall('SetRealRemove(QString, QString)', sScreenNo, sCode)
return ret
def OnEventConnect(self, nErrCode):
"""
OpenAPI 메뉴얼 참조
:param nErrCode:
:return:
"""
# print("CTrade : OnEventConnect")
logger.debug('OnEventConnect', nErrCode)
def OnReceiveMsg(self, sScrNo, sRQName, sTRCode, sMsg):
"""
OpenAPI 메뉴얼 참조
:param sScrNo:
:param sRQName:
:param sTRCode:
:param sMsg:
:return:
"""
# print("CTrade : OnReceiveMsg")
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTRCode, sMsg))
# self.InquiryLoop.exit()
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
"""
OpenAPI 메뉴얼 참조
:param sScrNo:
:param sRQName:
:param sTRCode:
:param sRecordName:
:param sPreNext:
:param nDataLength:
:param sErrorCode:
:param sMessage:
:param sSPlmMsg:
:return:
"""
# print('CTrade : OnReceiveTrData')
try:
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo[:4]):
return
if 'B_' in sRQName or 'S_' in sRQName:
주문번호 = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, 0, "주문번호")
# logger.debug("화면번호: %s sRQName : %s 주문번호: %s" % (sScrNo, sRQName, 주문번호))
self.주문등록(sRQName, 주문번호)
if sRQName == "d+2예수금요청":
data = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)',sTRCode, "", sRQName, 0, "d+2추정예수금")
# 입력된 문자열에 대해 lstrip 메서드를 통해 문자열 왼쪽에 존재하는 '-' 또는 '0'을 제거. 그리고 format 함수를 통해 천의 자리마다 콤마를 추가한 문자열로 변경
strip_data = data.lstrip('-0')
if strip_data == '':
strip_data = '0'
format_data = format(int(strip_data), ',d')
if data.startswith('-'):
format_data = '-' + format_data
self.sAsset = format_data
self.depositLoop.exit() # self.d2_deposit를 로봇에서 바로 쓸 수 있도록하기 위해서 예수금을 받고나서 루프해제시킴
if sRQName == "계좌평가잔고내역요청":
print("계좌평가잔고내역요청_수신")
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
self.CList = []
for i in range(0, cnt):
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, '종목번호').strip().lstrip('0')
# print(S)
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
S = self.종목코드변환(S) # 종목코드 맨 첫 'A'를 삭제하기 위함
self.CList.append(S)
# logger.debug("%s" % row)
if sPreNext == '2':
self.remained_data = True
self.InquiryList(_repeat=2)
else:
self.remained_data = False
print(self.CList)
self.InquiryLoop.exit()
if sRQName == "일자별종목별실현손익요청":
try:
data_idx = ['종목명', '체결량', '매입단가', '체결가', '당일매도손익', '손익율', '당일매매수수료', '당일매매세금']
result = []
for idx in data_idx:
data = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode,
"",
sRQName, 0, idx)
result.append(data.strip())
self.DailyProfitUpload(result)
except Exception as e:
print(e)
logger.error('일자별종목별실현손익요청 Error : %s' % e)
except Exception as e:
print('CTrade_OnReceiveTrData Error ', e)
Telegram('[StockTrader]CTrade_OnReceiveTrData Error : %s' % e, send='mc')
logger.error('CTrade_OnReceiveTrData Error : %s' % e)
def OnReceiveChejanData(self, sGubun, nItemCnt, sFidList):
"""
OpenAPI 메뉴얼 참조
:param sGubun:
:param nItemCnt:
:param sFidList:
:return:
"""
# logger.debug('OnReceiveChejanData [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
# 주문체결시 순서
# 1 구분:0 GetChejanData(913) = '접수'
# 2 구분:0 GetChejanData(913) = '체결'
# 3 구분:1 잔고정보
"""
# sFid별 주요데이터는 다음과 같습니다.
# "9201" : "계좌번호"
# "9203" : "주문번호"
# "9001" : "종목코드"
# "913" : "주문상태"
# "302" : "종목명"
# "900" : "주문수량"
# "901" : "주문가격"
# "902" : "미체결수량"
# "903" : "체결누계금액"
# "904" : "원주문번호"
# "905" : "주문구분"
# "906" : "매매구분"
# "907" : "매도수구분"
# "908" : "주문/체결시간"
# "909" : "체결번호"
# "910" : "체결가"
# "911" : "체결량"
# "10" : "현재가"
# "27" : "(최우선)매도호가"
# "28" : "(최우선)매수호가"
# "914" : "단위체결가"
# "915" : "단위체결량"
# "919" : "거부사유"
# "920" : "화면번호"
# "917" : "신용구분"
# "916" : "대출일"
# "930" : "보유수량"
# "931" : "매입단가"
# "932" : "총매입가"
# "933" : "주문가능수량"
# "945" : "당일순매수수량"
# "946" : "매도/매수구분"
# "950" : "당일총매도손일"
# "951" : "예수금"
# "307" : "기준가"
# "8019" : "손익율"
# "957" : "신용금액"
# "958" : "신용이자"
# "918" : "만기일"
# "990" : "당일실현손익(유가)"
# "991" : "당일실현손익률(유가)"
# "992" : "당일실현손익(신용)"
# "993" : "당일실현손익률(신용)"
# "397" : "파생상품거래단위"
# "305" : "상한가"
# "306" : "하한가"
"""
# print("CTrade : OnReceiveChejanData")
try:
# 접수
if sGubun == "0":
# logger.debug('OnReceiveChejanData: 접수 [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
화면번호 = self.kiwoom.dynamicCall('GetChejanData(QString)', 920)
if len(화면번호.replace(' ','')) == 0 : # 로봇 실행중 영웅문으로 주문 발생 시 화면번호가 ' '로 들어와 에러발생함 방지
print('다른 프로그램을 통한 거래 발생')
Telegram('다른 프로그램을 통한 거래 발생', send='mc')
logger.info('다른 프로그램을 통한 거래 발생')
return
elif self.sScreenNo != int(화면번호[:4]):
return
param = dict()
param['sGubun'] = sGubun
param['계좌번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 9201)
param['주문번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 9203)
param['종목코드'] = self.종목코드변환(self.kiwoom.dynamicCall('GetChejanData(QString)', 9001))
param['주문업무분류'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 912)
# 접수 / 체결 확인
# 주문상태(10:원주문, 11:정정주문, 12:취소주문, 20:주문확인, 21:정정확인, 22:취소확인, 90-92:주문거부)
param['주문상태'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 913) # 접수 or 체결 확인
param['종목명'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 302).strip()
param['주문수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 900)
param['주문가격'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 901)
param['미체결수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 902)
param['체결누계금액'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 903)
param['원주문번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 904)
param['주문구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 905)
param['매매구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 906)
param['매도수구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 907)
param['체결시간'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 908)
param['체결번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 909)
param['체결가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 910)
param['체결량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 911)
param['현재가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 10)
param['매도호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 27)
param['매수호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 28)
param['단위체결가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 914).strip()
param['단위체결량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 915)
param['화면번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 920)
param['당일매매수수료'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 938)
param['당일매매세금'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 939)
param['체결수량'] = int(param['주문수량']) - int(param['미체결수량'])
logger.debug('접수 - 주문상태:{주문상태} 계좌번호:{계좌번호} 체결시간:{체결시간} 주문번호:{주문번호} 체결번호:{체결번호} 종목코드:{종목코드} 종목명:{종목명} 체결량:{체결량} 체결가:{체결가} 단위체결가:{단위체결가} 주문수량:{주문수량} 체결수량:{체결수량} 단위체결량:{단위체결량} 미체결수량:{미체결수량} 당일매매수수료:{당일매매수수료} 당일매매세금:{당일매매세금}'.format(**param))
# if param["주문상태"] == "접수":
# self.접수처리(param)
# if param["주문상태"] == "체결": # 매도의 경우 체결로 안들어옴
# self.체결처리(param)
self.체결처리(param)
# 잔고통보
if sGubun == "1":
# logger.debug('OnReceiveChejanData: 잔고통보 [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
param = dict()
param['sGubun'] = sGubun
param['계좌번호'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 9201)
param['종목코드'] = self.종목코드변환(self.kiwoom.dynamicCall('GetChejanData(QString)', 9001))
param['신용구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 917)
param['대출일'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 916)
param['종목명'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 302).strip()
param['현재가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 10)
param['보유수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 930)
param['매입단가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 931)
param['총매입가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 932)
param['주문가능수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 933)
param['당일순매수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 945)
param['매도매수구분'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 946)
param['당일총매도손익'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 950)
param['예수금'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 951)
param['매도호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 27)
param['매수호가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 28)
param['기준가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 307)
param['손익율'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 8019)
param['신용금액'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 957)
param['신용이자'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 958)
param['만기일'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 918)
param['당일실현손익_유가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 990)
param['당일실현손익률_유가'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 991)
param['당일실현손익_신용'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 992)
param['당일실현손익률_신용'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 993)
param['담보대출수량'] = self.kiwoom.dynamicCall('GetChejanData(QString)', 959)
logger.debug('잔고통보 - 계좌번호:{계좌번호} 종목명:{종목명} 보유수량:{보유수량} 매입단가:{매입단가} 총매입가:{총매입가} 손익율:{손익율} 당일총매도손익:{당일총매도손익} 당일순매수량:{당일순매수량}'.format(**param))
self.잔고처리(param)
# 특이신호
if sGubun == "3":
logger.debug('OnReceiveChejanData: 특이신호 [%s] [%s] [%s]' % (sGubun, nItemCnt, sFidList))
pass
except Exception as e:
print('CTrade_OnReceiveChejanData Error ', e)
Telegram('[StockTrader]CTrade_OnReceiveChejanData Error : %s' % e, send='mc')
logger.error('CTrade_OnReceiveChejanData Error : %s' % e)
def OnReceiveRealData(self, sRealKey, sRealType, sRealData):
"""
OpenAPI 메뉴얼 참조
:param sRealKey:
:param sRealType:
:param sRealData:
:return:
"""
# logger.debug('OnReceiveRealData [%s] [%s] [%s]' % (sRealKey, sRealType, sRealData))
_now = datetime.datetime.now()
try:
if _now.strftime('%H:%M:%S') < '09:00:00': # 9시 이전 데이터 버림(장 시작 전에 테이터 들어오는 것도 많으므로 버리기 위함)
return
if sRealKey not in self.실시간종목리스트: # 리스트에 없는 데이터 버림
return
if sRealType == "주식시세" or sRealType == "주식체결":
param = dict()
param['종목코드'] = self.종목코드변환(sRealKey)
param['체결시간'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 20).strip()
param['현재가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 10).strip()
param['전일대비'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 11).strip()
param['등락률'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 12).strip()
param['매도호가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 27).strip()
param['매수호가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 28).strip()
param['누적거래량'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 13).strip()
param['시가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 16).strip()
param['고가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 17).strip()
param['저가'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 18).strip()
param['거래회전율'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 31).strip()
param['시가총액'] = self.kiwoom.dynamicCall("GetCommRealData(QString, int)", sRealType, 311).strip()
self.실시간데이터처리(param)
except Exception as e:
print('CTrade_OnReceiveRealData Error ', e)
Telegram('[StockTrader]CTrade_OnReceiveRealData Error : %s' % e, send='mc')
logger.error('CTrade_OnReceiveRealData Error : %s' % e)
def OnReceiveTrCondition(self, sScrNo, strCodeList, strConditionName, nIndex, nNext):
print('OnReceiveTrCondition')
try:
if strCodeList == "":
self.ConditionLoop.exit()
return []
self.codeList = strCodeList.split(';')
del self.codeList[-1]
# print(self.codeList)
logger.info("[%s]조건 검색 완료"%(self.sName))
self.ConditionLoop.exit()
print('OnReceiveTrCondition :', self.codeList)
return self.codeList
except Exception as e:
print("OnReceiveTrCondition_Error")
print(e)
def OnReceiveConditionVer(self, lRet, sMsg):
print('OnReceiveConditionVer')
try:
self.condition = self.getConditionNameList()
except Exception as e:
print("CTrade : OnReceiveConditionVer_Error")
finally:
self.ConditionLoop.exit()
def OnReceiveRealCondition(self, sTrCode, strType, strConditionName, strConditionIndex):
# print("CTrade : OnReceiveRealCondition")
# OpenAPI 메뉴얼 참조
# :param sTrCode:
# :param strType:
# :param strConditionName:
# :param strConditionIndex:
# :return:
_now = datetime.datetime.now().strftime('%H:%M:%S')
if (_now >= '10:00:00' and _now < '13:00:00') or _now >= '15:17:00': # 10시부터 13시 이전 데이터 버림, 15시 17분 당일 매도 처리 후 데이터 버림
return
# logger.info('OnReceiveRealCondition [%s] [%s] [%s] [%s]' % (sTrCode, strType, strConditionName, strConditionIndex))
print("실시간조검검색_종목코드: %s %s / Time : %s"%(sTrCode, "종목편입" if strType == "I" else "종목이탈", _now))
if strType == 'I':
self.실시간조건처리(sTrCode)
def 종목코드변환(self, code): # TR 통해서 받은 종목 코드에 A가 붙을 경우 삭제
return code.replace('A', '')
def 정량매수(self, sRQName, 종목코드, 매수가, 수량):
# sRQName = '정량매수%s' % self.sScreenNo
sScreenNo = self.GenScreenNO() # 주문을 낼때 마다 스크린번호를 생성
sAccNo = self.sAccount
nOrderType = 1 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 수량
nPrice = 매수가
sHogaGb = self.매수방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo)
return ret
def 정액매수(self, sRQName, 종목코드, 매수가, 매수금액):
# sRQName = '정액매수%s' % self.sScreenNo
try:
sScreenNo = self.GenScreenNO()
sAccNo = self.sAccount
nOrderType = 1 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 매수금액 // 매수가
nPrice = 매수가
sHogaGb = self.매수방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
# logger.debug('주문 - %s %s %s %s %s %s %s %s %s', sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb, sOrgOrderNo)
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb,
sOrgOrderNo)
return ret
except Exception as e:
print('CTrade_정액매수 Error ', e)
Telegram('[StockTrader]CTrade_정액매수 Error : %s' % e, send='mc')
logger.error('CTrade_정액매수 Error : %s' % e)
def 정량매도(self, sRQName, 종목코드, 매도가, 수량):
# sRQName = '정량매도%s' % self.sScreenNo
try:
sScreenNo = self.GenScreenNO()
sAccNo = self.sAccount
nOrderType = 2 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 수량
nPrice = 매도가
sHogaGb = self.매도방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb,
sOrgOrderNo)
return ret
except Exception as e:
print('[%s]정량매도 Error '%(self.sName,e))
Telegram('[StockTrader][%s]정량매도 Error : %s' % (self.sName, e), send='mc')
logger.error('[%s]정량매도 Error : %s' % (self.sName, e))
def 정액매도(self, sRQName, 종목코드, 매도가, 수량):
# sRQName = '정액매도%s' % self.sScreenNo
sScreenNo = self.GenScreenNO()
sAccNo = self.sAccount
nOrderType = 2 # (1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정)
sCode = 종목코드
nQty = 수량
nPrice = 매도가
sHogaGb = self.매도방법 # 00:지정가, 03:시장가, 05:조건부지정가, 06:최유리지정가, 07:최우선지정가, 10:지정가IOC, 13:시장가IOC, 16:최유리IOC, 20:지정가FOK, 23:시장가FOK, 26:최유리FOK, 61:장개시전시간외, 62:시간외단일가매매, 81:시간외종가
if sHogaGb in ['03', '07', '06']:
nPrice = 0
sOrgOrderNo = 0
ret = self.parent.KiwoomSendOrder(sRQName, sScreenNo, sAccNo, nOrderType, sCode, nQty, nPrice, sHogaGb,
sOrgOrderNo)
return ret
def 주문등록(self, sRQName, 주문번호):
self.주문번호_주문_매핑[주문번호] = sRQName
Ui_계좌정보조회, QtBaseClass_계좌정보조회 = uic.loadUiType("./UI/계좌정보조회.ui")
class 화면_계좌정보(QDialog, Ui_계좌정보조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_계좌정보, self).__init__(parent) # Initialize하는 형식
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['종목번호', '종목명', '현재가', '보유수량', '매입가', '매입금액', '평가금액', '수익률(%)', '평가손익', '매매가능수량']
self.보이는컬럼 = ['종목번호', '종목명', '현재가', '보유수량', '매입가', '매입금액', '평가금액', '수익률(%)', '평가손익', '매매가능수량'] # 주당 손익 -> 수익률(%)
self.result = []
self.KiwoomAccount()
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def KiwoomAccount(self):
ACCOUNT_CNT = self.kiwoom.dynamicCall('GetLoginInfo("ACCOUNT_CNT")')
ACC_NO = self.kiwoom.dynamicCall('GetLoginInfo("ACCNO")')
self.account = ACC_NO.split(';')[0:-1] # 계좌번호가 ;가 붙어서 나옴(에로 계좌가 3개면 111;222;333)
self.comboBox.clear()
self.comboBox.addItems(self.account)
logger.debug("보유 계좌수: %s 계좌번호: %s [%s]" % (ACCOUNT_CNT, self.account[0], ACC_NO))
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (
sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if sRQName == "계좌평가잔고내역요청":
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
# print(j)
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "", sRQName, i, j).strip().lstrip('0')
# print(S)
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
# logger.debug("%s" % row)
if sPreNext == '2':
self.Request(_repeat=2)
else:
self.model.update(DataFrame(data=self.result, columns=self.보이는컬럼))
print(self.result)
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
계좌번호 = self.comboBox.currentText().strip()
logger.debug("계좌번호 %s" % 계좌번호)
# KOA StudioSA에서 opw00018 확인
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "계좌번호", 계좌번호) # 8132495511
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "비밀번호입력매체구분", '00')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "조회구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "계좌평가잔고내역요청", "opw00018", _repeat,'{:04d}'.format(self.sScreenNo))
# 조회 버튼(QtDesigner에서 조회버튼 누르고 오른쪽 하단에 시그널/슬롯편집기를 보면 조회버튼 시그널(clicked), 슬롯(Inquiry())로 확인가능함
def inquiry(self):
self.result = []
self.Request(_repeat=0)
def robot_account(self):
global 로봇거래계좌번호
로봇거래계좌번호 = self.comboBox.currentText().strip()
# sqlite3 사용
try:
with sqlite3.connect(DATABASE) as conn:
cursor = conn.cursor()
robot_account = pickle.dumps(로봇거래계좌번호, protocol=pickle.HIGHEST_PROTOCOL, fix_imports=True)
_robot_account = base64.encodebytes(robot_account)
cursor.execute("REPLACE into Setting(keyword, value) values (?, ?)",
['robotaccount', _robot_account])
conn.commit()
print("로봇 계좌 등록 완료")
except Exception as e:
print('robot_account', e)
Ui_일자별주가조회, QtBaseClass_일자별주가조회 = uic.loadUiType("./UI/일자별주가조회.ui")
class 화면_일별주가(QDialog, Ui_일자별주가조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_일별주가, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('일자별 주가 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['일자', '현재가', '거래량', '시가', '고가', '저가', '거래대금']
self.result = []
d = today
self.lineEdit_date.setText(str(d))
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
if self.sScreenNo != int(sScrNo):
return
if sRQName == "주식일봉차트조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and S[0] == '-':
S = '-' + S[1:].lstrip('0')
row.append(S)
self.result.append(row)
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df = DataFrame(data=self.result, columns=self.columns)
df['종목코드'] = self.종목코드
self.model.update(df[['종목코드'] + self.columns])
for i in range(len(self.columns)):
self.tableView.resizeColumnToContents(i)
def Request(self, _repeat=0):
self.종목코드 = self.lineEdit_code.text().strip()
기준일자 = self.lineEdit_date.text().strip().replace('-', '')
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "종목코드", self.종목코드)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "기준일자", 기준일자)
ret = self.kiwoom.dynamicCall('SetInputValue(Qstring, Qstring)', "수정주가구분", '1')
ret = self.kiwoom.dynamicCall('CommRqData(QString, QString, int, QString)', "주식일봉차트조회", "OPT10081", _repeat,
'{:04d}'.format(self.sScreenNo))
def inquiry(self):
self.result = []
self.Request(_repeat=0)
Ui_분별주가조회, QtBaseClass_분별주가조회 = uic.loadUiType("./UI/분별주가조회.ui")
class 화면_분별주가(QDialog, Ui_분별주가조회):
def __init__(self, sScreenNo, kiwoom=None, parent=None):
super(화면_분별주가, self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setupUi(self)
self.setWindowTitle('분별 주가 조회')
self.sScreenNo = sScreenNo
self.kiwoom = kiwoom
self.parent = parent
self.model = PandasModel()
self.tableView.setModel(self.model)
self.columns = ['체결시간', '현재가', '시가', '고가', '저가', '거래량']
self.result = []
def KiwoomConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].connect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].connect(self.OnReceiveTrData)
def KiwoomDisConnect(self):
self.kiwoom.OnReceiveMsg[str, str, str, str].disconnect(self.OnReceiveMsg)
self.kiwoom.OnReceiveTrData[str, str, str, str, str, int, str, str, str].disconnect(self.OnReceiveTrData)
def OnReceiveMsg(self, sScrNo, sRQName, sTrCode, sMsg):
logger.debug('OnReceiveMsg [%s] [%s] [%s] [%s]' % (sScrNo, sRQName, sTrCode, sMsg))
def OnReceiveTrData(self, sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg):
# logger.debug('OnReceiveTrData [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] [%s] ' % (sScrNo, sRQName, sTRCode, sRecordName, sPreNext, nDataLength, sErrorCode, sMessage, sSPlmMsg))
print('화면_분별주가 : OnReceiveTrData')
if self.sScreenNo != int(sScrNo):
return
if sRQName == "주식분봉차트조회":
종목코드 = ''
cnt = self.kiwoom.dynamicCall('GetRepeatCnt(QString, QString)', sTRCode, sRQName)
for i in range(0, cnt):
row = []
for j in self.columns:
S = self.kiwoom.dynamicCall('CommGetData(QString, QString, QString, int, QString)', sTRCode, "",
sRQName, i, j).strip().lstrip('0')
if len(S) > 0 and (S[0] == '-' or S[0] == '+'):
S = S[1:].lstrip('0')
row.append(S)
self.result.append(row)
# df = DataFrame(data=self.result, columns=self.columns)
# df.to_csv('분봉.csv', encoding='euc-kr')
if sPreNext == '2':
QTimer.singleShot(주문지연, lambda: self.Request(_repeat=2))
else:
df =
|
DataFrame(data=self.result, columns=self.columns)
|
pandas.DataFrame
|
from __future__ import absolute_import
import wx
import numpy as np
import pandas as pd
import copy
import platform
from collections import OrderedDict
# For log dec tool
from .common import CHAR, Error, pretty_num_short, Info
from .plotdata import PlotData
from pydatview.tools.damping import logDecFromDecay
from pydatview.tools.curve_fitting import model_fit, extract_key_miscnum, extract_key_num, MODELS, FITTERS, set_common_keys
TOOL_BORDER=15
# --------------------------------------------------------------------------------}
# --- Default class for tools
# --------------------------------------------------------------------------------{
class GUIToolPanel(wx.Panel):
def __init__(self, parent):
super(GUIToolPanel,self).__init__(parent)
self.parent = parent
def destroy(self,event=None):
self.parent.removeTools()
def getBtBitmap(self,par,label,Type=None,callback=None,bitmap=False):
if Type is not None:
label=CHAR[Type]+' '+label
bt=wx.Button(par,wx.ID_ANY, label, style=wx.BU_EXACTFIT)
#try:
# if bitmap is not None:
# bt.SetBitmapLabel(wx.ArtProvider.GetBitmap(bitmap)) #,size=(12,12)))
# else:
#except:
# pass
if callback is not None:
par.Bind(wx.EVT_BUTTON, callback, bt)
return bt
def getToggleBtBitmap(self,par,label,Type=None,callback=None,bitmap=False):
if Type is not None:
label=CHAR[Type]+' '+label
bt=wx.ToggleButton(par,wx.ID_ANY, label, style=wx.BU_EXACTFIT)
if callback is not None:
par.Bind(wx.EVT_TOGGLEBUTTON, callback, bt)
return bt
# --------------------------------------------------------------------------------}
# --- Log Dec
# --------------------------------------------------------------------------------{
class LogDecToolPanel(GUIToolPanel):
def __init__(self, parent):
super(LogDecToolPanel,self).__init__(parent)
btClose = self.getBtBitmap(self,'Close' ,'close' ,self.destroy )
btComp = self.getBtBitmap(self,'Compute','compute',self.onCompute)
self.lb = wx.StaticText( self, -1, ' ')
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(btClose ,0, flag = wx.LEFT|wx.CENTER,border = 1)
self.sizer.Add(btComp ,0, flag = wx.LEFT|wx.CENTER,border = 5)
self.sizer.Add(self.lb ,0, flag = wx.LEFT|wx.CENTER,border = 5)
self.SetSizer(self.sizer)
def onCompute(self,event=None):
if len(self.parent.plotData)!=1:
Error(self,'Log Dec tool only works with a single plot.')
return
pd =self.parent.plotData[0]
try:
logdec,DampingRatio,T,fn,fd,IPos,INeg,epos,eneg=logDecFromDecay(pd.y,pd.x)
lab='LogDec.: {:.4f} - Damping ratio: {:.4f} - F_n: {:.4f} - F_d: {:.4f} - T:{:.3f}'.format(logdec,DampingRatio,fn,fd,T)
self.lb.SetLabel(lab)
self.sizer.Layout()
ax=self.parent.fig.axes[0]
ax.plot(pd.x[IPos],pd.y[IPos],'o')
ax.plot(pd.x[INeg],pd.y[INeg],'o')
ax.plot(pd.x ,epos,'k--')
ax.plot(pd.x ,eneg,'k--')
self.parent.canvas.draw()
except:
self.lb.SetLabel('Failed. The signal needs to look like the decay of a first order system.')
#self.parent.load_and_draw(); # DATA HAS CHANGED
# --------------------------------------------------------------------------------}
# --- Outliers
# --------------------------------------------------------------------------------{
class OutlierToolPanel(GUIToolPanel):
"""
A quick and dirty solution to manipulate plotData
I need to think of a better way to do that
"""
def __init__(self, parent):
super(OutlierToolPanel,self).__init__(parent)
self.parent = parent # parent is GUIPlotPanel
# Setting default states to parent
if 'RemoveOutliers' not in self.parent.plotDataOptions.keys():
self.parent.plotDataOptions['RemoveOutliers']=False
if 'OutliersMedianDeviation' not in self.parent.plotDataOptions.keys():
self.parent.plotDataOptions['OutliersMedianDeviation']=5
btClose = self.getBtBitmap(self,'Close','close',self.destroy)
self.btComp = self.getToggleBtBitmap(self,'Apply','cloud',self.onToggleCompute)
lb1 = wx.StaticText(self, -1, 'Median deviation:')
# self.tMD = wx.TextCtrl(self, wx.ID_ANY,, size = (30,-1), style=wx.TE_PROCESS_ENTER)
self.tMD = wx.SpinCtrlDouble(self, value='11', size=wx.Size(60,-1))
self.tMD.SetValue(self.parent.plotDataOptions['OutliersMedianDeviation'])
self.tMD.SetRange(0.0, 1000)
self.tMD.SetIncrement(0.5)
self.lb = wx.StaticText( self, -1, '')
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(btClose ,0,flag = wx.LEFT|wx.CENTER,border = 1)
self.sizer.Add(self.btComp,0,flag = wx.LEFT|wx.CENTER,border = 5)
self.sizer.Add(lb1 ,0,flag = wx.LEFT|wx.CENTER,border = 5)
self.sizer.Add(self.tMD ,0,flag = wx.LEFT|wx.CENTER,border = 5)
self.sizer.Add(self.lb ,0,flag = wx.LEFT|wx.CENTER,border = 5)
self.SetSizer(self.sizer)
self.Bind(wx.EVT_SPINCTRLDOUBLE, self.onMDChangeArrow, self.tMD)
self.Bind(wx.EVT_TEXT_ENTER, self.onMDChangeEnter, self.tMD)
if platform.system()=='Windows':
# See issue https://github.com/wxWidgets/Phoenix/issues/1762
self.spintxt = self.tMD.Children[0]
assert isinstance(self.spintxt, wx.TextCtrl)
self.spintxt.Bind(wx.EVT_CHAR_HOOK, self.onMDChangeChar)
self.onToggleCompute(init=True)
def destroy(self,event=None):
self.parent.plotDataOptions['RemoveOutliers']=False
super(OutlierToolPanel,self).destroy()
def onToggleCompute(self,event=None, init=False):
self.parent.plotDataOptions['OutliersMedianDeviation'] = float(self.tMD.Value)
if not init:
self.parent.plotDataOptions['RemoveOutliers']= not self.parent.plotDataOptions['RemoveOutliers']
if self.parent.plotDataOptions['RemoveOutliers']:
self.lb.SetLabel('Outliers are now removed on the fly. Click "Clear" to stop.')
self.btComp.SetLabel(CHAR['sun']+' Clear')
else:
self.lb.SetLabel('Click on "Apply" to remove outliers on the fly for all new plot.')
self.btComp.SetLabel(CHAR['cloud']+' Apply')
if not init:
self.parent.load_and_draw() # Data will change
def onMDChange(self, event=None):
#print(self.tMD.Value)
self.parent.plotDataOptions['OutliersMedianDeviation'] = float(self.tMD.Value)
if self.parent.plotDataOptions['RemoveOutliers']:
self.parent.load_and_draw() # Data will change
def onMDChangeArrow(self, event):
self.onMDChange()
event.Skip()
def onMDChangeEnter(self, event):
self.onMDChange()
event.Skip()
def onMDChangeChar(self, event):
event.Skip()
code = event.GetKeyCode()
if code in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER]:
#print(self.spintxt.Value)
self.tMD.SetValue(self.spintxt.Value)
self.onMDChangeEnter(event)
# --------------------------------------------------------------------------------}
# --- Moving Average
# --------------------------------------------------------------------------------{
class FilterToolPanel(GUIToolPanel):
"""
Moving average/Filters
A quick and dirty solution to manipulate plotData
I need to think of a better way to do that
"""
def __init__(self, parent):
from pydatview.tools.signal import FILTERS
super(FilterToolPanel,self).__init__(parent)
self.parent = parent # parent is GUIPlotPanel
self._DEFAULT_FILTERS=FILTERS
# Setting default states to parent
if 'Filter' not in self.parent.plotDataOptions.keys():
self.parent.plotDataOptions['Filter']=None
self._filterApplied = type(self.parent.plotDataOptions['Filter'])==dict
btClose = self.getBtBitmap(self,'Close','close',self.destroy)
self.btClear = self.getBtBitmap(self, 'Clear Plot','sun' , self.onClear)
self.btComp = self.getToggleBtBitmap(self,'Apply','cloud',self.onToggleCompute)
self.btPlot = self.getBtBitmap(self, 'Plot' ,'chart' , self.onPlot)
lb1 = wx.StaticText(self, -1, 'Filter:')
self.cbFilters = wx.ComboBox(self, choices=[filt['name'] for filt in self._DEFAULT_FILTERS], style=wx.CB_READONLY)
self.lbParamName = wx.StaticText(self, -1, ' :')
self.cbFilters.SetSelection(0)
#self.tParam = wx.TextCtrl(self, wx.ID_ANY,, size = (30,-1), style=wx.TE_PROCESS_ENTER)
self.tParam = wx.SpinCtrlDouble(self, value='11', size=wx.Size(60,-1))
self.lbInfo = wx.StaticText( self, -1, '')
# --- Layout
btSizer = wx.FlexGridSizer(rows=3, cols=2, hgap=2, vgap=0)
btSizer.Add(btClose ,0,flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(self.btClear ,0,flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(self.btComp,0,flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(self.btPlot ,0,flag = wx.ALL|wx.EXPAND, border = 1)
#btSizer.Add(btHelp ,0,flag = wx.ALL|wx.EXPAND, border = 1)
horzSizer = wx.BoxSizer(wx.HORIZONTAL)
horzSizer.Add(lb1 ,0,flag = wx.LEFT|wx.CENTER,border = 5)
horzSizer.Add(self.cbFilters ,0,flag = wx.LEFT|wx.CENTER,border = 1)
horzSizer.Add(self.lbParamName ,0,flag = wx.LEFT|wx.CENTER,border = 5)
horzSizer.Add(self.tParam ,0,flag = wx.LEFT|wx.CENTER,border = 1)
vertSizer = wx.BoxSizer(wx.VERTICAL)
vertSizer.Add(self.lbInfo ,0, flag = wx.LEFT ,border = 5)
vertSizer.Add(horzSizer ,1, flag = wx.LEFT|wx.EXPAND,border = 1)
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(btSizer ,0, flag = wx.LEFT ,border = 1)
self.sizer.Add(vertSizer ,1, flag = wx.EXPAND|wx.LEFT ,border = 1)
self.SetSizer(self.sizer)
# --- Events
self.cbFilters.Bind(wx.EVT_COMBOBOX, self.onSelectFilt)
self.Bind(wx.EVT_SPINCTRLDOUBLE, self.onParamChangeArrow, self.tParam)
self.Bind(wx.EVT_TEXT_ENTER, self.onParamChangeEnter, self.tParam)
if platform.system()=='Windows':
# See issue https://github.com/wxWidgets/Phoenix/issues/1762
self.spintxt = self.tParam.Children[0]
assert isinstance(self.spintxt, wx.TextCtrl)
self.spintxt.Bind(wx.EVT_CHAR_HOOK, self.onParamChangeChar)
self.onSelectFilt()
self.onToggleCompute(init=True)
def destroy(self,event=None):
self.parent.plotDataOptions['Filter']=None
super(FilterToolPanel,self).destroy()
def onSelectFilt(self, event=None):
""" Select the filter, but does not applied it to the plotData
parentFilt is unchanged
But if the parent already has
"""
iFilt = self.cbFilters.GetSelection()
filt = self._DEFAULT_FILTERS[iFilt]
self.lbParamName.SetLabel(filt['paramName']+':')
self.tParam.SetRange(filt['paramRange'][0], filt['paramRange'][1])
self.tParam.SetIncrement(filt['increment'])
parentFilt=self.parent.plotDataOptions['Filter']
# Value
if type(parentFilt)==dict and parentFilt['name']==filt['name']:
self.tParam.SetValue(parentFilt['param'])
else:
self.tParam.SetValue(filt['param'])
def onToggleCompute(self, event=None, init=False):
"""
apply Filter based on GUI Data
"""
parentFilt=self.parent.plotDataOptions['Filter']
if not init:
self._filterApplied = not self._filterApplied
if self._filterApplied:
self.parent.plotDataOptions['Filter'] =self._GUI2Filt()
#print('Apply', self.parent.plotDataOptions['Filter'])
self.lbInfo.SetLabel(
'Filter is now applied on the fly. Change parameter live. Click "Clear" to stop. '
)
self.btPlot.Enable(False)
self.btClear.Enable(False)
self.btComp.SetLabel(CHAR['sun']+' Clear')
else:
self.parent.plotDataOptions['Filter'] = None
self.lbInfo.SetLabel(
'Click on "Apply" to set filter on the fly for all plots. '+
'Click on "Plot" to try a filter on the current plot.'
)
self.btPlot.Enable(True)
self.btClear.Enable(True)
self.btComp.SetLabel(CHAR['cloud']+' Apply')
if not init:
self.parent.load_and_draw() # Data will change
pass
def _GUI2Filt(self):
iFilt = self.cbFilters.GetSelection()
filt = self._DEFAULT_FILTERS[iFilt].copy()
filt['param']=np.float(self.spintxt.Value)
return filt
def onPlot(self, event=None):
"""
Overlay on current axis the filter
"""
from pydatview.tools.signal import applyFilter
if len(self.parent.plotData)!=1:
Error(self,'Plotting only works for a single plot. Plot less data.')
return
filt=self._GUI2Filt()
PD = self.parent.plotData[0]
y_filt = applyFilter(PD.x0, PD.y0, filt)
ax = self.parent.fig.axes[0]
PD_new = PlotData()
PD_new.fromXY(PD.x0, y_filt)
self.parent.transformPlotData(PD_new)
ax.plot(PD_new.x, PD_new.y, '-')
self.parent.canvas.draw()
def onClear(self, event):
self.parent.load_and_draw() # Data will change
def onParamChange(self, event=None):
if self._filterApplied:
self.parent.plotDataOptions['Filter'] =self._GUI2Filt()
#print('OnParamChange', self.parent.plotDataOptions['Filter'])
self.parent.load_and_draw() # Data will change
def onParamChangeArrow(self, event):
self.onParamChange()
event.Skip()
def onParamChangeEnter(self, event):
self.onParamChange()
event.Skip()
def onParamChangeChar(self, event):
event.Skip()
code = event.GetKeyCode()
if code in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER]:
#print(self.spintxt.Value)
self.tParam.SetValue(self.spintxt.Value)
self.onParamChangeEnter(event)
# --------------------------------------------------------------------------------}
# --- Resample
# --------------------------------------------------------------------------------{
class ResampleToolPanel(GUIToolPanel):
def __init__(self, parent):
super(ResampleToolPanel,self).__init__(parent)
# --- Data from other modules
from pydatview.tools.signal import SAMPLERS
self.parent = parent # parent is GUIPlotPanel
self._SAMPLERS=SAMPLERS
# Setting default states to parent
if 'Sampler' not in self.parent.plotDataOptions.keys():
self.parent.plotDataOptions['Sampler']=None
self._applied = type(self.parent.plotDataOptions['Sampler'])==dict
# --- GUI elements
self.btClose = self.getBtBitmap(self, 'Close','close', self.destroy)
self.btAdd = self.getBtBitmap(self, 'Add','add' , self.onAdd)
self.btPlot = self.getBtBitmap(self, 'Plot' ,'chart' , self.onPlot)
self.btClear = self.getBtBitmap(self, 'Clear Plot','sun', self.onClear)
self.btApply = self.getToggleBtBitmap(self,'Apply','cloud',self.onToggleApply)
self.btHelp = self.getBtBitmap(self, 'Help','help', self.onHelp)
#self.lb = wx.StaticText( self, -1, """ Click help """)
self.cbTabs = wx.ComboBox(self, -1, choices=[], style=wx.CB_READONLY)
self.cbMethods = wx.ComboBox(self, -1, choices=[s['name'] for s in self._SAMPLERS], style=wx.CB_READONLY)
self.lbNewX = wx.StaticText(self, -1, 'New x: ')
self.textNewX = wx.TextCtrl(self, wx.ID_ANY, '', style = wx.TE_PROCESS_ENTER)
self.textOldX = wx.TextCtrl(self, wx.ID_ANY|wx.TE_READONLY)
self.textOldX.Enable(False)
# --- Layout
btSizer = wx.FlexGridSizer(rows=3, cols=2, hgap=2, vgap=0)
btSizer.Add(self.btClose , 0, flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(self.btClear , 0, flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(self.btAdd , 0, flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(self.btPlot , 0, flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(self.btHelp , 0, flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(self.btApply , 0, flag = wx.ALL|wx.EXPAND, border = 1)
msizer = wx.FlexGridSizer(rows=2, cols=4, hgap=2, vgap=0)
msizer.Add(wx.StaticText(self, -1, 'Table:') , 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 1)
msizer.Add(self.cbTabs , 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 1)
msizer.Add(wx.StaticText(self, -1, 'Current x: '), 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 1)
msizer.Add(self.textOldX , 1, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM|wx.EXPAND, 1)
msizer.Add(wx.StaticText(self, -1, 'Method:') , 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 1)
msizer.Add(self.cbMethods , 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 1)
msizer.Add(self.lbNewX , 0, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM, 1)
msizer.Add(self.textNewX , 1, wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM|wx.EXPAND, 1)
msizer.AddGrowableCol(3,1)
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(btSizer ,0, flag = wx.LEFT ,border = 5)
self.sizer.Add(msizer ,1, flag = wx.LEFT|wx.EXPAND ,border = TOOL_BORDER)
self.SetSizer(self.sizer)
# --- Events
self.cbTabs.Bind (wx.EVT_COMBOBOX, self.onTabChange)
self.cbMethods.Bind(wx.EVT_COMBOBOX, self.onMethodChange)
self.textNewX.Bind(wx.EVT_TEXT_ENTER,self.onParamChange)
# --- Init triggers
self.cbMethods.SetSelection(3)
self.onMethodChange(init=True)
self.onToggleApply(init=True)
self.updateTabList()
self.textNewX.SetValue('2')
def setCurrentX(self, x=None):
if x is None:
x= self.parent.plotData[0].x
if len(x)<50:
s=np.array2string(x, separator=', ')
else:
s =np.array2string(x[[0,1,2,3]], separator=', ')
s+=', ..., '
s+=np.array2string(x[[-3,-2,-1]], separator=', ')
s=s.replace('[','').replace(']','').replace(' ','').replace(',',', ')
self.textOldX.SetValue(s)
def onMethodChange(self, event=None, init=True):
""" Select the method, but does not applied it to the plotData
User data and option is unchanged
But if the user already has some options, they are used
"""
iOpt = self.cbMethods.GetSelection()
opt = self._SAMPLERS[iOpt]
self.lbNewX.SetLabel(opt['paramName']+':')
parentOpt=self.parent.plotDataOptions['Sampler']
# Value
if len(self.textNewX.Value)==0:
if type(parentOpt)==dict:
self.textNewX.SetValue(str(parentOpt['param'])[1:-1])
else:
self.textNewX.SetValue(str(opt['param'])[2:-2])
self.onParamChange()
def onParamChange(self, event=None):
if self._applied:
self.parent.plotDataOptions['Sampler'] =self._GUI2Data()
self.parent.load_and_draw() # Data will change
self.setCurrentX()
def _GUI2Data(self):
iOpt = self.cbMethods.GetSelection()
opt = self._SAMPLERS[iOpt].copy()
s= self.textNewX.Value.strip().replace('[','').replace(']','')
if len(s)>0:
if s.find(','):
opt['param']=np.array(s.split(',')).astype(float)
else:
opt['param']=np.array(s.split('')).astype(float)
return opt
def onToggleApply(self, event=None, init=False):
"""
apply sampler based on GUI Data
"""
parentFilt=self.parent.plotDataOptions['Sampler']
if not init:
self._applied = not self._applied
if self._applied:
self.parent.plotDataOptions['Sampler'] =self._GUI2Data()
#print('Apply', self.parent.plotDataOptions['Sampler'])
#self.lbInfo.SetLabel(
# 'Sampler is now applied on the fly. Change parameter live. Click "Clear" to stop. '
# )
self.btPlot.Enable(False)
self.btClear.Enable(False)
self.btApply.SetLabel(CHAR['sun']+' Clear')
else:
self.parent.plotDataOptions['Sampler'] = None
#self.lbInfo.SetLabel(
# 'Click on "Apply" to set filter on the fly for all plots. '+
# 'Click on "Plot" to try a filter on the current plot.'
# )
self.btPlot.Enable(True)
self.btClear.Enable(True)
self.btApply.SetLabel(CHAR['cloud']+' Apply')
if not init:
self.parent.load_and_draw() # Data will change
self.setCurrentX()
def onAdd(self,event=None):
iSel = self.cbTabs.GetSelection()
tabList = self.parent.selPanel.tabList
mainframe = self.parent.mainframe
icol, colname = self.parent.selPanel.xCol
print(icol,colname)
opt = self._GUI2Data()
errors=[]
if iSel==0:
dfs, names, errors = tabList.applyResampling(icol, opt, bAdd=True)
mainframe.load_dfs(dfs,names,bAdd=True)
else:
df, name = tabList.get(iSel-1).applyResampling(icol, opt, bAdd=True)
mainframe.load_df(df,name,bAdd=True)
self.updateTabList()
if len(errors)>0:
raise Exception('Error: The resampling failed on some tables:\n\n'+'\n'.join(errors))
def onPlot(self,event=None):
from pydatview.tools.signal import applySampler
if len(self.parent.plotData)!=1:
Error(self,'Plotting only works for a single plot. Plot less data.')
return
opts=self._GUI2Data()
PD = self.parent.plotData[0]
x_new, y_new = applySampler(PD.x0, PD.y0, opts)
ax = self.parent.fig.axes[0]
PD_new = PlotData()
PD_new.fromXY(x_new, y_new)
self.parent.transformPlotData(PD_new)
ax.plot(PD_new.x, PD_new.y, '-')
self.setCurrentX(x_new)
self.parent.canvas.draw()
def onClear(self,event=None):
self.parent.load_and_draw() # Data will change
# Update Current X
self.setCurrentX()
# Update Table list
self.updateTabList()
def onTabChange(self,event=None):
#tabList = self.parent.selPanel.tabList
#iSel=self.cbTabs.GetSelection()
pass
def updateTabList(self,event=None):
tabList = self.parent.selPanel.tabList
tabListNames = ['All opened tables']+tabList.getDisplayTabNames()
try:
iSel=np.max([np.min([self.cbTabs.GetSelection(),len(tabListNames)]),0])
self.cbTabs.Clear()
[self.cbTabs.Append(tn) for tn in tabListNames]
self.cbTabs.SetSelection(iSel)
except RuntimeError:
pass
def onHelp(self,event=None):
Info(self,"""Resampling.
The resampling operation changes the "x" values of a table/plot and
adapt the "y" values accordingly.
To resample perform the following step:
- Chose a resampling method:
- replace: specify all the new x-values
- insert : insert a list of x values to the existing ones
- delete : delete a list of x values from the existing ones
- every-n : use every n values
- time-based: downsample using sample averaging or upsample using
linear interpolation, x-axis must already be in seconds
- delta x : specify a delta for uniform spacing of x values
- Specify the x values as a space or comma separated list
- Click on one of the following buttons:
- Plot: will display the resampled data on the figure
- Apply: will perform the resampling on the fly for all new plots
- Add: will create new table(s) with resampled values for all
signals. This process might take some time.
Select a table or choose all (default)
""")
# --------------------------------------------------------------------------------}
# --- Mask
# --------------------------------------------------------------------------------{
class MaskToolPanel(GUIToolPanel):
def __init__(self, parent):
super(MaskToolPanel,self).__init__(parent)
tabList = self.parent.selPanel.tabList
tabListNames = ['All opened tables']+tabList.getDisplayTabNames()
allMask = tabList.commonMaskString
if len(allMask)==0:
allMask=self.guessMask(tabList) # no known mask, we guess one to help the user
self.applied=False
else:
self.applied=True
btClose = self.getBtBitmap(self, 'Close','close', self.destroy)
btComp = self.getBtBitmap(self, u'Mask (add)','add' , self.onApply)
if self.applied:
self.btCompMask = self.getToggleBtBitmap(self, 'Clear','sun', self.onToggleApplyMask)
self.btCompMask.SetValue(True)
else:
self.btCompMask = self.getToggleBtBitmap(self, 'Mask','cloud', self.onToggleApplyMask)
self.lb = wx.StaticText( self, -1, """(Example of mask: "({Time}>100) && ({Time}<50) && ({WS}==5)" or "{Date} > '2018-10-01'")""")
self.cbTabs = wx.ComboBox(self, choices=tabListNames, style=wx.CB_READONLY)
self.cbTabs.SetSelection(0)
self.textMask = wx.TextCtrl(self, wx.ID_ANY, allMask)
#self.textMask.SetValue('({Time}>100) & ({Time}<400)')
#self.textMask.SetValue("{Date} > '2018-10-01'")
btSizer = wx.FlexGridSizer(rows=2, cols=2, hgap=2, vgap=0)
btSizer.Add(btClose ,0,flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(wx.StaticText(self, -1, '') ,0,flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(btComp ,0,flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(self.btCompMask ,0,flag = wx.ALL|wx.EXPAND, border = 1)
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
row_sizer.Add(wx.StaticText(self, -1, 'Tab:') , 0, wx.CENTER|wx.LEFT, 0)
row_sizer.Add(self.cbTabs , 0, wx.CENTER|wx.LEFT, 2)
row_sizer.Add(wx.StaticText(self, -1, 'Mask:'), 0, wx.CENTER|wx.LEFT, 5)
row_sizer.Add(self.textMask, 1, wx.CENTER|wx.LEFT|wx.EXPAND, 5)
vert_sizer = wx.BoxSizer(wx.VERTICAL)
vert_sizer.Add(self.lb ,0, flag = wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border = 5)
vert_sizer.Add(row_sizer ,1, flag = wx.EXPAND|wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border = 5)
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(btSizer ,0, flag = wx.LEFT ,border = 5)
self.sizer.Add(vert_sizer ,1, flag = wx.LEFT|wx.EXPAND ,border = TOOL_BORDER)
self.SetSizer(self.sizer)
self.Bind(wx.EVT_COMBOBOX, self.onTabChange, self.cbTabs )
def onTabChange(self,event=None):
tabList = self.parent.selPanel.tabList
iSel=self.cbTabs.GetSelection()
if iSel==0:
maskString = tabList.commonMaskString
else:
maskString= tabList.get(iSel-1).maskString
if len(maskString)>0:
self.textMask.SetValue(maskString)
#else:
# self.textMask.SetValue('') # no known mask
# self.textMask.SetValue(self.guessMask) # no known mask
def guessMask(self,tabList):
cols=[c.lower() for c in tabList.get(0).columns_clean]
if 'time' in cols:
return '{Time} > 100'
elif 'date' in cols:
return "{Date} > '2017-01-01"
else:
return ''
def onClear(self,event=None):
iSel = self.cbTabs.GetSelection()
tabList = self.parent.selPanel.tabList
mainframe = self.parent.mainframe
if iSel==0:
tabList.clearCommonMask()
else:
tabList.get(iSel-1).clearMask()
mainframe.redraw()
self.onTabChange()
def onToggleApplyMask(self,event=None):
self.applied = not self.applied
if self.applied:
self.btCompMask.SetLabel(CHAR['sun']+' Clear')
else:
self.btCompMask.SetLabel(CHAR['cloud']+' Mask')
if self.applied:
self.onApply(event,bAdd=False)
else:
self.onClear()
def onApply(self,event=None,bAdd=True):
maskString = self.textMask.GetLineText(0)
iSel = self.cbTabs.GetSelection()
tabList = self.parent.selPanel.tabList
mainframe = self.parent.mainframe
if iSel==0:
dfs, names, errors = tabList.applyCommonMaskString(maskString, bAdd=bAdd)
if bAdd:
mainframe.load_dfs(dfs,names,bAdd=bAdd)
else:
mainframe.redraw()
if len(errors)>0:
raise Exception('Error: The mask failed on some tables:\n\n'+'\n'.join(errors))
else:
dfs, name = tabList.get(iSel-1).applyMaskString(maskString, bAdd=bAdd)
if bAdd:
mainframe.load_df(df,name,bAdd=bAdd)
else:
mainframe.redraw()
self.updateTabList()
def updateTabList(self,event=None):
tabList = self.parent.selPanel.tabList
tabListNames = ['All opened tables']+tabList.getDisplayTabNames()
try:
iSel=np.min([self.cbTabs.GetSelection(),len(tabListNames)])
self.cbTabs.Clear()
[self.cbTabs.Append(tn) for tn in tabListNames]
self.cbTabs.SetSelection(iSel)
except RuntimeError:
pass
# --------------------------------------------------------------------------------}
# --- Radial
# --------------------------------------------------------------------------------{
sAVG_METHODS = ['Last `n` seconds','Last `n` periods']
AVG_METHODS = ['constantwindow','periods']
class RadialToolPanel(GUIToolPanel):
def __init__(self, parent):
super(RadialToolPanel,self).__init__(parent)
tabList = self.parent.selPanel.tabList
tabListNames = ['All opened tables']+tabList.getDisplayTabNames()
btClose = self.getBtBitmap(self,'Close' ,'close' , self.destroy)
btComp = self.getBtBitmap(self,'Average','compute', self.onApply) # ART_PLUS
self.lb = wx.StaticText( self, -1, """Select tables, averaging method and average parameter (`Period` methods uses the `azimuth` signal) """)
self.cbTabs = wx.ComboBox(self, choices=tabListNames, style=wx.CB_READONLY)
self.cbMethod = wx.ComboBox(self, choices=sAVG_METHODS, style=wx.CB_READONLY)
self.cbTabs.SetSelection(0)
self.cbMethod.SetSelection(0)
self.textAverageParam = wx.TextCtrl(self, wx.ID_ANY, '2',size = (36,-1), style=wx.TE_PROCESS_ENTER)
btSizer = wx.FlexGridSizer(rows=2, cols=1, hgap=0, vgap=0)
#btSizer = wx.BoxSizer(wx.VERTICAL)
btSizer.Add(btClose ,0, flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(btComp ,0, flag = wx.ALL|wx.EXPAND, border = 1)
row_sizer = wx.BoxSizer(wx.HORIZONTAL)
row_sizer.Add(wx.StaticText(self, -1, 'Tab:') , 0, wx.CENTER|wx.LEFT, 0)
row_sizer.Add(self.cbTabs , 0, wx.CENTER|wx.LEFT, 2)
row_sizer.Add(wx.StaticText(self, -1, 'Method:'), 0, wx.CENTER|wx.LEFT, 5)
row_sizer.Add(self.cbMethod , 0, wx.CENTER|wx.LEFT, 2)
row_sizer.Add(wx.StaticText(self, -1, 'Param:') , 0, wx.CENTER|wx.LEFT, 5)
row_sizer.Add(self.textAverageParam , 0, wx.CENTER|wx.LEFT|wx.RIGHT| wx.EXPAND, 2)
vert_sizer = wx.BoxSizer(wx.VERTICAL)
vert_sizer.Add(self.lb ,0, flag =wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM,border = 5)
vert_sizer.Add(row_sizer ,0, flag =wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM,border = 5)
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(btSizer ,0, flag = wx.LEFT ,border = 5)
self.sizer.Add(vert_sizer ,0, flag = wx.LEFT|wx.EXPAND,border = TOOL_BORDER)
self.SetSizer(self.sizer)
self.Bind(wx.EVT_COMBOBOX, self.onTabChange, self.cbTabs )
def onTabChange(self,event=None):
tabList = self.parent.selPanel.tabList
def onApply(self,event=None):
try:
avgParam = float(self.textAverageParam.GetLineText(0))
except:
raise Exception('Error: the averaging parameter needs to be an integer or a float')
iSel = self.cbTabs.GetSelection()
avgMethod = AVG_METHODS[self.cbMethod.GetSelection()]
tabList = self.parent.selPanel.tabList
mainframe = self.parent.mainframe
if iSel==0:
dfs, names, errors = tabList.radialAvg(avgMethod,avgParam)
mainframe.load_dfs(dfs,names,bAdd=True)
if len(errors)>0:
raise Exception('Error: The mask failed on some tables:\n\n'+'\n'.join(errors))
else:
dfs, names = tabList.get(iSel-1).radialAvg(avgMethod,avgParam)
mainframe.load_dfs(dfs,names,bAdd=True)
self.updateTabList()
def updateTabList(self,event=None):
tabList = self.parent.selPanel.tabList
tabListNames = ['All opened tables']+tabList.getDisplayTabNames()
iSel=np.min([self.cbTabs.GetSelection(),len(tabListNames)])
self.cbTabs.Clear()
[self.cbTabs.Append(tn) for tn in tabListNames]
self.cbTabs.SetSelection(iSel)
# --------------------------------------------------------------------------------}
# --- Curve Fitting
# --------------------------------------------------------------------------------{
MODELS_EXAMPLE =[
{'label':'User defined model', 'id':'eval:',
'formula':'{a}*x**2 + {b}',
'coeffs':None,
'consts':None,
'bounds':None },
]
MODELS_EXTRA =[
# {'label':'Exponential decay', 'id':'eval:',
# 'formula':'{A}*exp(-{k}*x)+{B}',
# 'coeffs' :'k=1, A=1, B=0',
# 'consts' :None,
# 'bounds' :None},
]
class CurveFitToolPanel(GUIToolPanel):
def __init__(self, parent):
super(CurveFitToolPanel,self).__init__(parent)
# Data
self.x = None
self.y_fit = None
# GUI Objecst
btClose = self.getBtBitmap(self, 'Close','close', self.destroy)
btClear = self.getBtBitmap(self, 'Clear','sun', self.onClear) # DELETE
btAdd = self.getBtBitmap(self, 'Add','add' , self.onAdd)
btCompFit = self.getBtBitmap(self, 'Fit','check', self.onCurveFit)
btHelp = self.getBtBitmap(self, 'Help','help', self.onHelp)
boldFont = self.GetFont().Bold()
lbOutputs = wx.StaticText(self, -1, 'Outputs')
lbInputs = wx.StaticText(self, -1, 'Inputs ')
lbOutputs.SetFont(boldFont)
lbInputs.SetFont(boldFont)
self.textFormula = wx.TextCtrl(self, wx.ID_ANY, '')
self.textGuess = wx.TextCtrl(self, wx.ID_ANY, '')
self.textBounds = wx.TextCtrl(self, wx.ID_ANY, '')
self.textConstants = wx.TextCtrl(self, wx.ID_ANY, '')
self.textFormulaNum = wx.TextCtrl(self, wx.ID_ANY, '', style=wx.TE_READONLY)
self.textCoeffs = wx.TextCtrl(self, wx.ID_ANY, '', style=wx.TE_READONLY)
self.textInfo = wx.TextCtrl(self, wx.ID_ANY, '', style=wx.TE_READONLY)
self.Models=copy.deepcopy(MODELS_EXAMPLE) + copy.deepcopy(FITTERS) + copy.deepcopy(MODELS) + copy.deepcopy(MODELS_EXTRA)
sModels=[d['label'] for d in self.Models]
self.cbModels = wx.ComboBox(self, choices=sModels, style=wx.CB_READONLY)
self.cbModels.SetSelection(0)
btSizer = wx.FlexGridSizer(rows=3, cols=2, hgap=2, vgap=0)
btSizer.Add(btClose ,0,flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(btClear ,0,flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(btAdd ,0,flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(btCompFit ,0,flag = wx.ALL|wx.EXPAND, border = 1)
btSizer.Add(btHelp ,0,flag = wx.ALL|wx.EXPAND, border = 1)
inputSizer = wx.FlexGridSizer(rows=5, cols=2, hgap=0, vgap=0)
inputSizer.Add(lbInputs ,0, flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM,border = 1)
inputSizer.Add(self.cbModels ,1, flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM|wx.EXPAND,border = 1)
inputSizer.Add(wx.StaticText(self, -1, 'Formula:') ,0, flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM,border = 1)
inputSizer.Add(self.textFormula ,1, flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM|wx.EXPAND,border = 1)
inputSizer.Add(wx.StaticText(self, -1, 'Guess:') ,0, flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM,border = 1)
inputSizer.Add(self.textGuess ,1, flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM|wx.EXPAND,border = 1)
inputSizer.Add(wx.StaticText(self, -1, 'Bounds:') ,0, flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM,border = 1)
inputSizer.Add(self.textBounds ,1, flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM|wx.EXPAND,border = 1)
inputSizer.Add(wx.StaticText(self, -1, 'Constants:'),0, flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM,border = 1)
inputSizer.Add(self.textConstants ,1, flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM|wx.EXPAND,border = 1)
inputSizer.AddGrowableCol(1,1)
outputSizer = wx.FlexGridSizer(rows=5, cols=2, hgap=0, vgap=0)
outputSizer.Add(lbOutputs ,0 , flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM,border = 1)
outputSizer.Add(wx.StaticText(self, -1, '') ,0 , flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM,border = 1)
outputSizer.Add(wx.StaticText(self, -1, 'Formula:'),0 , flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM,border = 1)
outputSizer.Add(self.textFormulaNum ,1 , flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM|wx.EXPAND,border = 1)
outputSizer.Add(wx.StaticText(self, -1, 'Parameters:') ,0 , flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM,border = 1)
outputSizer.Add(self.textCoeffs ,1 , flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM|wx.EXPAND,border = 1)
outputSizer.Add(wx.StaticText(self, -1, 'Accuracy:') ,0 , flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM,border = 1)
outputSizer.Add(self.textInfo ,1 , flag=wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL|wx.TOP|wx.BOTTOM|wx.EXPAND,border = 1)
outputSizer.AddGrowableCol(1,0.5)
horzSizer = wx.BoxSizer(wx.HORIZONTAL)
horzSizer.Add(inputSizer ,1.0, flag = wx.LEFT|wx.EXPAND,border = 2)
horzSizer.Add(outputSizer ,1.0, flag = wx.LEFT|wx.EXPAND,border = 9)
vertSizer = wx.BoxSizer(wx.VERTICAL)
# vertSizer.Add(self.lbHelp ,0, flag = wx.LEFT ,border = 1)
vertSizer.Add(horzSizer ,1, flag = wx.LEFT|wx.EXPAND,border = 1)
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(btSizer ,0, flag = wx.LEFT ,border = 1)
# self.sizer.Add(vertSizerCB ,0, flag = wx.LEFT ,border = 1)
self.sizer.Add(vertSizer ,1, flag = wx.EXPAND|wx.LEFT ,border = 1)
self.SetSizer(self.sizer)
self.Bind(wx.EVT_COMBOBOX, self.onModelChange, self.cbModels)
self.onModelChange()
def onModelChange(self,event=None):
iModel = self.cbModels.GetSelection()
d = self.Models[iModel]
self.textFormula.SetEditable(True)
if d['id'].find('fitter:')==0 :
self.textGuess.Enable(False)
self.textGuess.SetValue('')
self.textFormula.Enable(False)
self.textFormula.SetValue(d['formula'])
self.textBounds.Enable(False)
self.textBounds.SetValue('')
self.textConstants.Enable(True)
# NOTE: conversion to string works with list, and tuples, not numpy array
val = ', '.join([k+'='+str(v) for k,v in d['consts'].items()])
self.textConstants.SetValue(val)
else:
# Formula
if d['id'].find('eval:')==0 :
self.textFormula.Enable(True)
self.textFormula.SetEditable(True)
else:
#self.textFormula.Enable(False)
self.textFormula.Enable(True)
self.textFormula.SetEditable(False)
self.textFormula.SetValue(d['formula'])
# Guess
if d['coeffs'] is None:
self.textGuess.SetValue('')
else:
self.textGuess.SetValue(d['coeffs'])
# Constants
if d['consts'] is None or len(d['consts'].strip())==0:
self.textConstants.Enable(False)
self.textConstants.SetValue('')
else:
self.textConstants.Enable(True)
self.textConstants.SetValue(d['consts'])
# Bounds
self.textBounds.Enable(True)
if d['bounds'] is None or len(d['bounds'].strip())==0:
self.textBounds.SetValue('all=(-np.inf, np.inf)')
else:
self.textBounds.SetValue(d['bounds'])
# Outputs
self.textFormulaNum.SetValue('(Click on Fit)')
self.textCoeffs.SetValue('')
self.textInfo.SetValue('')
def onCurveFit(self,event=None):
self.x = None
self.y_fit = None
if len(self.parent.plotData)!=1:
Error(self,'Curve fitting tool only works with a single curve. Plot less data.')
return
PD =self.parent.plotData[0]
iModel = self.cbModels.GetSelection()
d = self.Models[iModel]
if d['id'].find('fitter:')==0 :
sFunc=d['id']
p0=None
bounds=None
fun_kwargs=extract_key_miscnum(self.textConstants.GetLineText(0).replace('np.inf','inf'))
else:
# Formula
sFunc=d['id']
if sFunc=='eval:':
sFunc+=self.textFormula.GetLineText(0)
# Bounds
bounds=self.textBounds.GetLineText(0).replace('np.inf','inf')
# Guess
p0=self.textGuess.GetLineText(0).replace('np.inf','inf')
fun_kwargs=extract_key_num(self.textConstants.GetLineText(0).replace('np.inf','inf'))
#print('>>> Model fit sFunc :',sFunc )
#print('>>> Model fit p0 :',p0 )
#print('>>> Model fit bounds:',bounds )
#print('>>> Model fit kwargs:',fun_kwargs)
# Performing fit
y_fit, pfit, fitter = model_fit(sFunc, PD.x, PD.y, p0=p0, bounds=bounds,**fun_kwargs)
formatter = lambda x: pretty_num_short(x, digits=3)
formula_num = fitter.formula_num(fmt=formatter)
# Update info
self.textFormulaNum.SetValue(formula_num)
self.textCoeffs.SetValue(', '.join(['{}={:s}'.format(k,formatter(v)) for k,v in fitter.model['coeffs'].items()]))
self.textInfo.SetValue('R2 = {:.3f} '.format(fitter.model['R2']))
# Saving
d['formula'] = self.textFormula.GetLineText(0)
d['bounds'] = self.textBounds.GetLineText(0).replace('np.inf','inf')
d['coeffs'] = self.textGuess.GetLineText(0).replace('np.inf','inf')
if d['id'].find('fitter:')==0 :
d['consts'], _ = set_common_keys(d['consts'],fun_kwargs)
else:
d['consts']= self.textConstants.GetLineText(0).replace('np.inf','inf')
# Plot
ax=self.parent.fig.axes[0]
ax.plot(PD.x,y_fit,'o', ms=4)
self.parent.canvas.draw()
self.x=PD.x
self.y_fit=y_fit
self.sx=PD.sx
self.sy=PD.sy
def onClear(self,event=None):
self.parent.load_and_draw() # DATA HAS CHANGED
self.onModelChange()
def onAdd(self,event=None):
name='model_fit'
if self.x is not None and self.y_fit is not None:
df=
|
pd.DataFrame({self.sx:self.x, self.sy:self.y_fit})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([
|
Timedelta(hours=3)
|
pandas.Timedelta
|
import sys
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.metrics import roc_auc_score, make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import OneHotEncoder
sys.path.append('src')
from mappings import MAPPINGS
from models import KernelClassifier
# Define parameter names (AUs) and target label (EMOTIONS)
PARAM_NAMES = np.loadtxt('data/au_names_new.txt', dtype=str).tolist()
EMOTIONS = np.array(['anger', 'disgust', 'fear', 'happy', 'sadness', 'surprise'])
# One-hot encode target label
ohe = OneHotEncoder(categories='auto', sparse=False)
ohe.fit(EMOTIONS[:, None])
# Define analysis parameters
scores_all = []
# Loop across mappings (Darwin, Ekman, etc.)
for kernel in tqdm(['cosine', 'sigmoid', 'linear', 'euclidean', 'l1', 'l2']):
for beta in [1, 10, 100, 1000, 10000]:
for mapp_name, mapp in MAPPINGS.items():
# ktype = kernel type (infer from kernel name)
ktype = 'similarity' if kernel in ['cosine', 'sigmoid', 'linear'] else 'distance'
# Initialize model!
model = KernelClassifier(au_cfg=mapp, param_names=PARAM_NAMES, kernel=kernel, ktype=ktype,
binarize_X=False, normalization='softmax', beta=beta)
subs = [str(s).zfill(2) for s in range(1, 61)]
if mapp_name == 'JS':
subs = subs[1::2]
# Initialize scores (one score per subject and per emotion)
scores = np.zeros((len(subs), len(EMOTIONS)))
# Compute model performance per subject!
for i, sub in enumerate(subs):
data = pd.read_csv(f'data/ratings/sub-{sub}_ratings.tsv', sep='\t', index_col=0)
data = data.query("emotion != 'other'")
if mapp_name == 'JS':
data = data.query("data_split == 'test'")
X, y = data.iloc[:, :33], data.loc[:, 'emotion']
# Technically, we're not "fitting" anything, but this will set up the mapping matrix (self.Z_)
model.fit(X, y)
# Predict data + compute performance (AUROC)
y_pred = model.predict_proba(X)
y_ohe = ohe.transform(y.to_numpy()[:, np.newaxis])
idx = y_ohe.sum(axis=0) != 0
scores[i, idx] = roc_auc_score(y_ohe[:, idx], y_pred[:, idx], average=None)
# Store scores and raw predictions
scores = pd.DataFrame(scores, columns=EMOTIONS, index=subs).reset_index()
scores =
|
pd.melt(scores, id_vars='index', value_name='score', var_name='emotion')
|
pandas.melt
|
import pandas as pd
passageiros = pd.read_csv('Passageiros.csv')
passageiros.head()
import seaborn as sns
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (10, 6)
mpl.rcParams['font.size'] = 22
sns.lineplot(x='tempo',y='passageiros', data=passageiros,label='dado_completo')
## Escalando os dados
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(passageiros)
dado_escalado = sc.transform(passageiros)
x=dado_escalado[:,0] #Features - Características - Tempo
y=dado_escalado[:,1] #Alvo - Número de passageiros
import matplotlib.pyplot as plt
sns.lineplot(x=x,y=y,label='dado_escalado')
plt.ylabel('Passageiros')
plt.xlabel('Data')
## Dividindo em treino e teste
tamanho_treino = int(len(passageiros)*0.9) #Pegando 90% dos dados para treino
tamanho_teste = len(passageiros)-tamanho_treino #O resto vamos reservar para teste
xtreino = x[0:tamanho_treino]
ytreino = y[0:tamanho_treino]
xteste = x[tamanho_treino:len(passageiros)]
yteste = y[tamanho_treino:len(passageiros)]
sns.lineplot(x=xtreino,y=ytreino,label='treino')
sns.lineplot(x=xteste,y=yteste,label='teste')
# Aula 2
## Regressão Linear
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
regressor = Sequential()
regressor.add(Dense(1, input_dim=1, kernel_initializer='Ones',
activation='linear',use_bias=False))
regressor.compile(loss='mean_squared_error',optimizer='adam')
regressor.summary()
regressor.fit(xtreino,ytreino)
y_predict= regressor.predict(xtreino) #Prevendo os dados de treino (o ajuste)
sns.lineplot(x=xtreino,y=ytreino,label='treino')
sns.lineplot(x=xtreino,y=y_predict[:,0],label='ajuste_treino')
d = {'tempo': xtreino, 'passageiros': y_predict[:,0]}
resultados = pd.DataFrame(data=d)
resultados
resultado_transf = sc.inverse_transform(resultados)
resultado_transf = pd.DataFrame(resultado_transf)
resultado_transf.columns = ['tempo','passageiros']
sns.lineplot(x="tempo",y="passageiros",data=passageiros)
sns.lineplot(x="tempo",y="passageiros",data=resultado_transf)
y_predict_teste= regressor.predict(xteste) #Prevendo os dados de teste(o futuro)
d = {'tempo': xteste, 'passageiros': y_predict_teste[:,0]}
resultados_teste = pd.DataFrame(data=d)
resultado_transf_teste = sc.inverse_transform(resultados_teste)
resultado_transf_teste = pd.DataFrame(resultado_transf_teste)
resultado_transf_teste.columns = ['tempo','passageiros']
sns.lineplot(x="tempo",y="passageiros",data=passageiros,label='dado_completo')
sns.lineplot(x="tempo",y="passageiros",data=resultado_transf,label='ajuste_treino')
sns.lineplot(x="tempo",y="passageiros",data=resultado_transf_teste,label='previsão')
## Regressão não-linear
regressor2 = Sequential()
regressor2.add(Dense(8, input_dim=1, kernel_initializer='random_uniform',
activation='sigmoid',use_bias=False))
regressor2.add(Dense(8, kernel_initializer='random_uniform',
activation='sigmoid',use_bias=False))
regressor2.add(Dense(1, kernel_initializer='random_uniform',
activation='linear',use_bias=False))
regressor2.compile(loss='mean_squared_error',optimizer='adam')
regressor2.summary()
regressor2.fit(xtreino,ytreino,epochs =500)
y_predict= regressor2.predict(xtreino) #Prevendo os dados de treino (o ajuste)
y_predict_teste= regressor2.predict(xteste) #Prevendo os dados de teste(o futuro)
sns.lineplot(x=xtreino,y=ytreino,label='treino')
sns.lineplot(x=xteste,y=yteste,label='teste')
sns.lineplot(x=xtreino,y=y_predict[:,0],label='ajuste_treino')
sns.lineplot(x=xteste,y=y_predict_teste[:,0],label='previsão')
# Aula 3
## Alterando a forma como passamos os dados
#Agora x e y vão valores diferentes. X vai conter o número de passageiros em um tempo anterior e y vai conter o número de passageiros em t+1, por exemplo.
vetor = pd.DataFrame(ytreino)[0]
import numpy as np
def separa_dados(vetor,n_passos):
"""Entrada: vetor: número de passageiros
n_passos: número de passos no regressor
Saída:
X_novo: Array 2D
y_novo: Array 1D - Nosso alvo
"""
X_novo, y_novo = [], []
for i in range(n_passos,vetor.shape[0]):
X_novo.append(list(vetor.loc[i-n_passos:i-1]))
y_novo.append(vetor.loc[i])
X_novo, y_novo = np.array(X_novo), np.array(y_novo)
return X_novo, y_novo
xtreino_novo, ytreino_novo = separa_dados(vetor,1)
print(xtreino_novo[0:5]) #X
print(ytreino_novo[0:5]) #y
## Agora vamos separar o teste
vetor2 =
|
pd.DataFrame(yteste)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Pipeline-GUI for Analysis with MNE-Python
@author: <NAME>
@email: <EMAIL>
@github: https://github.com/marsipu/mne_pipeline_hd
License: BSD (3-clause)
Written on top of MNE-Python
Copyright © 2011-2020, authors of MNE-Python (https://doi.org/10.3389/fnins.2013.00267)
inspired by <NAME>. (2018) (https://doi.org/10.3389/fnins.2018.00006)
"""
import inspect
import os
import shutil
from ast import literal_eval
from functools import partial
from importlib import util
from os import mkdir
from os.path import isdir, isfile, join
from pathlib import Path
from types import FunctionType
import pandas as pd
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import (QButtonGroup, QComboBox, QDialog, QFileDialog, QFormLayout, QGroupBox,
QHBoxLayout, QLabel, QLineEdit, QListView, QListWidget, QListWidgetItem,
QMessageBox, QPushButton, QSizePolicy, QStyle, QTabWidget, QVBoxLayout, QGridLayout,
QProgressBar, QCheckBox)
from mne_pipeline_hd import QS
from mne_pipeline_hd.gui import parameter_widgets
from mne_pipeline_hd.gui.base_widgets import CheckDictList, CheckList, EditDict, EditList, SimpleDialog, SimpleList
from mne_pipeline_hd.gui.gui_utils import CodeEditor, ErrorDialog, center, get_exception_tuple, set_ratio_geometry, \
get_std_icon, MainConsoleWidget
from mne_pipeline_hd.gui.models import CustomFunctionModel, RunModel
from mne_pipeline_hd.pipeline_functions.function_utils import QRunController
class RunDialog(QDialog):
def __init__(self, main_win):
super().__init__(main_win)
self.mw = main_win
self.init_controller()
self.init_ui()
set_ratio_geometry(0.6, self)
self.show()
self.start()
def init_controller(self):
self.rc = QRunController(run_dialog=self, controller=self.mw.ct,
pool=self.mw.mp_pool)
def init_ui(self):
layout = QVBoxLayout()
view_layout = QGridLayout()
view_layout.addWidget(QLabel('Objects: '), 0, 0)
self.object_view = QListView()
self.object_model = RunModel(self.rc.all_objects, mode='object')
self.object_view.setModel(self.object_model)
self.object_view.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
view_layout.addWidget(self.object_view, 1, 0)
view_layout.addWidget(QLabel('Functions: '), 0, 1)
self.func_view = QListView()
self.func_model = RunModel(self.rc.current_all_funcs, mode='func')
self.func_view.setModel(self.func_model)
self.func_view.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
view_layout.addWidget(self.func_view, 1, 1)
view_layout.addWidget(QLabel('Errors: '), 0, 2)
self.error_widget = SimpleList(list())
self.error_widget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum)
# Connect Signal from error_widget to function to enable inspecting the errors
self.error_widget.currentChanged.connect(self.show_error)
view_layout.addWidget(self.error_widget, 1, 2)
layout.addLayout(view_layout)
self.console_widget = MainConsoleWidget()
layout.addWidget(self.console_widget)
self.pgbar = QProgressBar()
self.pgbar.setValue(0)
self.pgbar.setMaximum(len(self.rc.all_steps))
layout.addWidget(self.pgbar)
bt_layout = QHBoxLayout()
self.continue_bt = QPushButton('Continue')
self.continue_bt.setFont(QFont('AnyStyle', 14))
self.continue_bt.setIcon(get_std_icon('SP_MediaPlay'))
self.continue_bt.clicked.connect(self.start)
bt_layout.addWidget(self.continue_bt)
self.pause_bt = QPushButton('Pause')
self.pause_bt.setFont(QFont('AnyStyle', 14))
self.pause_bt.setIcon(get_std_icon('SP_MediaPause'))
self.pause_bt.clicked.connect(self.pause_funcs)
bt_layout.addWidget(self.pause_bt)
self.restart_bt = QPushButton('Restart')
self.restart_bt.setFont(QFont('AnyStyle', 14))
self.restart_bt.setIcon(get_std_icon('SP_BrowserReload'))
self.restart_bt.clicked.connect(self.restart)
bt_layout.addWidget(self.restart_bt)
if QS().value('use_qthread'):
self.reload_chbx = None
else:
self.reload_chbx = QCheckBox('Reload Modules')
bt_layout.addWidget(self.reload_chbx)
self.autoscroll_bt = QPushButton('Auto-Scroll')
self.autoscroll_bt.setCheckable(True)
self.autoscroll_bt.setChecked(True)
self.autoscroll_bt.setIcon(get_std_icon('SP_DialogOkButton'))
self.autoscroll_bt.clicked.connect(self.toggle_autoscroll)
bt_layout.addWidget(self.autoscroll_bt)
self.close_bt = QPushButton('Close')
self.close_bt.setFont(QFont('AnyStyle', 14))
self.close_bt.setIcon(get_std_icon('SP_MediaStop'))
self.close_bt.clicked.connect(self.close)
bt_layout.addWidget(self.close_bt)
layout.addLayout(bt_layout)
self.setLayout(layout)
def start(self):
# Set paused to false
self.rc.paused = False
# Enable/Disable Buttons
self.continue_bt.setEnabled(False)
self.pause_bt.setEnabled(True)
self.restart_bt.setEnabled(False)
self.close_bt.setEnabled(False)
self.rc.start()
def pause_funcs(self):
self.rc.paused = True
self.console_widget.write_html('<br><b>Finishing last function...</b><br>')
def restart(self):
# Reinitialize controller
self.init_controller()
if self.reload_chbx and self.reload_chbx.isChecked():
self.mw.init_mp_pool()
# Clear Console-Widget
self.console_widget.clear()
# Redo References to display-widgets
self.object_model._data = self.rc.all_objects
self.object_model.layoutChanged.emit()
self.func_model._data = self.rc.current_all_funcs
self.func_model.layoutChanged.emit()
self.error_widget.replace_data(list(self.rc.errors.keys()))
# Reset Progress-Bar
self.pgbar.setValue(0)
# Restart
self.start()
def toggle_autoscroll(self, state):
if state:
self.console_widget.set_autoscroll(True)
else:
self.console_widget.set_autoscroll(False)
def show_error(self, current, _):
self.console_widget.set_autoscroll(False)
self.autoscroll_bt.setChecked(False)
self.console_widget.scrollToAnchor(str(self.rc.errors[current][1]))
def closeEvent(self, event):
self.mw.pipeline_running = False
event.accept()
class EditGuiArgsDlg(QDialog):
def __init__(self, cf_dialog):
super().__init__(cf_dialog)
self.cf = cf_dialog
self.gui_args = dict()
self.default_gui_args = dict()
if self.cf.current_parameter:
covered_params = ['data', 'param_name', 'param_alias', 'default', 'param_unit', 'description']
# Get possible default GUI-Args additional to those covered by the Main-GUI
gui_type = self.cf.add_pd_params.loc[self.cf.current_parameter, 'gui_type']
if pd.notna(gui_type):
gui_handle = getattr(parameter_widgets, gui_type)
psig = inspect.signature(gui_handle).parameters
self.default_gui_args = {p: psig[p].default for p in psig if p not in covered_params}
# Get current GUI-Args
loaded_gui_args = self.cf.add_pd_params.loc[self.cf.current_parameter, 'gui_args']
if pd.notna(loaded_gui_args):
self.gui_args = literal_eval(loaded_gui_args)
else:
self.gui_args = dict()
# Fill in all possible Options, which are not already changed
for arg_key in [ak for ak in self.default_gui_args if ak not in self.gui_args]:
self.gui_args[arg_key] = self.default_gui_args[arg_key]
if len(self.gui_args) > 0:
self.init_ui()
self.open()
def init_ui(self):
layout = QVBoxLayout()
layout.addWidget(EditDict(data=self.gui_args, ui_buttons=False))
close_bt = QPushButton('Close')
close_bt.clicked.connect(self.close)
layout.addWidget(close_bt)
self.setLayout(layout)
def closeEvent(self, event):
# Remove all options which don't differ from the default
for arg_key in [ak for ak in self.gui_args if self.gui_args[ak] == self.default_gui_args[ak]]:
self.gui_args.pop(arg_key)
if len(self.gui_args) > 0:
self.cf.pguiargs_changed(self.gui_args)
event.accept()
class ChooseOptions(QDialog):
def __init__(self, cf_dialog, gui_type, options):
super().__init__(cf_dialog)
self.cf = cf_dialog
self.gui_type = gui_type
self.options = options
self.init_ui()
# If open(), execution doesn't stop after the dialog
self.exec()
def init_ui(self):
layout = QVBoxLayout()
layout.addWidget(QLabel(f'For {self.gui_type}, you need to specify the options to choose from'))
layout.addWidget(EditList(data=self.options))
close_bt = QPushButton('Close')
close_bt.clicked.connect(self.close)
layout.addWidget(close_bt)
self.setLayout(layout)
# ToDo:
# Bug1: After saving a new function, the parameters stay in the table-view,
# Bug2: When editing existing functions, the proprietary parameters can not be edited (they land in exising_params)
# Bug3: When hitting Enter, the focus still lies on the AddFunc/EditFunc-Buttons which can disrupt setup
class CustomFunctionImport(QDialog):
def __init__(self, main_win):
super().__init__(main_win)
self.mw = main_win
self.ct = main_win.ct
self.file_path = None
self.pkg_name = None
self.current_function = None
self.current_parameter = None
self.oblig_func = ['target', 'tab', 'group', 'matplotlib', 'mayavi']
self.oblig_params = ['default', 'gui_type']
self.exst_functions = list(self.ct.pd_funcs.index)
self.exst_parameters = ['mw', 'pr', 'meeg', 'fsmri', 'group']
self.exst_parameters += list(self.ct.settings.keys())
self.exst_parameters += list(QS().childKeys())
self.exst_parameters += list(self.ct.pr.parameters[self.ct.pr.p_preset].keys())
self.param_exst_dict = dict()
self.code_editor = None
self.code_dict = dict()
# Get available parameter-guis
self.available_param_guis = [pg for pg in dir(parameter_widgets) if 'Gui' in pg and pg != 'QtGui']
self.add_pd_funcs = pd.DataFrame(columns=['alias', 'target', 'tab', 'group', 'matplotlib',
'mayavi', 'dependencies', 'module', 'func_args', 'ready'])
self.add_pd_params = pd.DataFrame(columns=['alias', 'group', 'default', 'unit', 'description', 'gui_type',
'gui_args', 'functions', 'ready'])
self.yes_icon = get_std_icon('SP_DialogApplyButton')
self.no_icon = get_std_icon('SP_DialogCancelButton')
self.setWindowTitle('Custom-Functions-Setup')
self.init_ui()
self.open()
def init_ui(self):
layout = QVBoxLayout()
# Import Button and Combobox
add_bt_layout = QHBoxLayout()
addfn_bt = QPushButton('Load Function/s')
addfn_bt.setFont(QFont(QS().value('app_font'), 12))
addfn_bt.clicked.connect(self.get_functions)
add_bt_layout.addWidget(addfn_bt)
editfn_bt = QPushButton('Edit Function/s')
editfn_bt.setFont(QFont(QS().value('app_font'), 12))
editfn_bt.clicked.connect(self.edit_functions)
add_bt_layout.addWidget(editfn_bt)
layout.addLayout(add_bt_layout)
# Function-ComboBox
func_cmbx_layout = QHBoxLayout()
self.func_cmbx = QComboBox()
self.func_cmbx.currentTextChanged.connect(self.func_item_selected)
func_cmbx_layout.addWidget(self.func_cmbx)
self.func_chkl = QLabel()
self.func_chkl.setPixmap(self.no_icon.pixmap(16, 16))
self.func_chkl.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
func_cmbx_layout.addWidget(self.func_chkl)
layout.addLayout(func_cmbx_layout)
# Hint for obligatory items
# There may be a better way to center the labels instead of with the space-labels
obl_hint_layout = QHBoxLayout()
space_label1 = QLabel('')
obl_hint_layout.addWidget(space_label1)
obl_hint_label1 = QLabel()
obl_hint_label1.setPixmap(self.no_icon.pixmap(16, 16))
obl_hint_label1.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
obl_hint_layout.addWidget(obl_hint_label1)
obl_hint_label2 = QLabel()
obl_hint_label2.setPixmap(get_std_icon('SP_ArrowForward').pixmap(16, 16))
obl_hint_label2.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
obl_hint_layout.addWidget(obl_hint_label2)
obl_hint_label3 = QLabel()
obl_hint_label3.setPixmap(self.yes_icon.pixmap(16, 16))
obl_hint_label3.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
obl_hint_layout.addWidget(obl_hint_label3)
obl_hint_label4 = QLabel('(= The items marked are obligatory)')
obl_hint_label4.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
obl_hint_layout.addWidget(obl_hint_label4)
space_label2 = QLabel('')
obl_hint_layout.addWidget(space_label2)
layout.addLayout(obl_hint_layout)
setup_layout = QHBoxLayout()
# The Function-Setup-Groupbox
func_setup_gbox = QGroupBox('Function-Setup')
func_setup_gbox.setAlignment(Qt.AlignHCenter)
func_setup_formlayout = QFormLayout()
self.falias_le = QLineEdit()
self.falias_le.setToolTip('Set a name if you want something other than the functions-name')
self.falias_le.textEdited.connect(self.falias_changed)
func_setup_formlayout.addRow('Alias', self.falias_le)
target_layout = QHBoxLayout()
self.target_cmbx = QComboBox()
self.target_cmbx.setToolTip('Set the target on which the function shall operate')
self.target_cmbx.setEditable(False)
self.target_cmbx.activated.connect(self.target_cmbx_changed)
target_layout.addWidget(self.target_cmbx)
self.target_chkl = QLabel()
target_layout.addWidget(self.target_chkl)
func_setup_formlayout.addRow('Target', target_layout)
tab_layout = QHBoxLayout()
self.tab_cmbx = QComboBox()
self.tab_cmbx.setToolTip('Choose the Tab for the function (Compute/Plot/...)')
self.tab_cmbx.setEditable(True)
self.tab_cmbx.activated.connect(self.tab_cmbx_changed)
self.tab_cmbx.editTextChanged.connect(self.tab_cmbx_edited)
tab_layout.addWidget(self.tab_cmbx)
self.tab_chkl = QLabel()
tab_layout.addWidget(self.tab_chkl)
func_setup_formlayout.addRow('Tab', tab_layout)
group_layout = QHBoxLayout()
self.group_cmbx = QComboBox()
self.group_cmbx.setToolTip('Choose the function-group for the function or create a new one')
self.group_cmbx.setEditable(True)
self.group_cmbx.activated.connect(self.group_cmbx_changed)
self.group_cmbx.editTextChanged.connect(self.group_cmbx_edited)
group_layout.addWidget(self.group_cmbx)
self.group_chkl = QLabel()
group_layout.addWidget(self.group_chkl)
func_setup_formlayout.addRow('Group', group_layout)
mtpl_layout = QHBoxLayout()
self.mtpl_bts = QButtonGroup(self)
self.mtpl_yesbt = QPushButton('Yes')
self.mtpl_yesbt.setCheckable(True)
self.mtpl_nobt = QPushButton('No')
self.mtpl_nobt.setCheckable(True)
self.mtpl_void = QPushButton('')
self.mtpl_void.setCheckable(True)
self.mtpl_bts.addButton(self.mtpl_yesbt)
self.mtpl_bts.addButton(self.mtpl_nobt)
self.mtpl_bts.addButton(self.mtpl_void)
mtpl_layout.addWidget(self.mtpl_yesbt)
mtpl_layout.addWidget(self.mtpl_nobt)
self.mtpl_yesbt.setToolTip('Choose, if the function contains an interactive Matplotlib-Plot')
self.mtpl_nobt.setToolTip('Choose, if the function contains no interactive Matplotlib-Plot')
self.mtpl_bts.buttonToggled.connect(self.mtpl_changed)
self.mtpl_chkl = QLabel()
mtpl_layout.addWidget(self.mtpl_chkl)
func_setup_formlayout.addRow('Matplotlib?', mtpl_layout)
myv_layout = QHBoxLayout()
self.myv_bts = QButtonGroup(self)
self.myv_yesbt = QPushButton('Yes')
self.myv_yesbt.setCheckable(True)
self.myv_nobt = QPushButton('No')
self.myv_nobt.setCheckable(True)
self.myv_void = QPushButton('')
self.myv_void.setCheckable(True)
self.myv_bts.addButton(self.myv_yesbt)
self.myv_bts.addButton(self.myv_nobt)
self.myv_bts.addButton(self.myv_void)
myv_layout.addWidget(self.myv_yesbt)
myv_layout.addWidget(self.myv_nobt)
self.myv_yesbt.setToolTip('Choose, if the function contains a Pyvista/Mayavi-Plot')
self.myv_nobt.setToolTip('Choose, if the function contains a Pyvista/Mayavi-Plot')
self.myv_bts.buttonToggled.connect(self.myv_changed)
self.myv_chkl = QLabel()
myv_layout.addWidget(self.myv_chkl)
func_setup_formlayout.addRow('Pyvista/Mayavi?', myv_layout)
self.dpd_bt = QPushButton('Set Dependencies')
self.dpd_bt.setToolTip('Set the functions that must be activated before or the files that must be present '
'for this function to work')
self.dpd_bt.clicked.connect(partial(SelectDependencies, self))
func_setup_formlayout.addRow('Dependencies', self.dpd_bt)
func_setup_gbox.setLayout(func_setup_formlayout)
setup_layout.addWidget(func_setup_gbox)
# The Parameter-Setup-Group-Box
self.param_setup_gbox = QGroupBox('Parameter-Setup')
self.param_setup_gbox.setAlignment(Qt.AlignHCenter)
param_setup_layout = QVBoxLayout()
self.exstparam_l = QLabel()
self.exstparam_l.setWordWrap(True)
self.exstparam_l.hide()
param_setup_layout.addWidget(self.exstparam_l)
self.param_view = QListView()
self.param_model = CustomFunctionModel(self.add_pd_params)
self.param_view.setModel(self.param_model)
self.param_view.selectionModel().currentChanged.connect(self.param_item_selected)
param_setup_layout.addWidget(self.param_view)
param_setup_formlayout = QFormLayout()
self.palias_le = QLineEdit()
self.palias_le.setToolTip('Set a name if you want something other than the parameters-name')
self.palias_le.textEdited.connect(self.palias_changed)
param_setup_formlayout.addRow('Alias', self.palias_le)
default_layout = QHBoxLayout()
self.default_le = QLineEdit()
self.default_le.setToolTip('Set the default for the parameter (it has to fit the gui-type!)')
self.default_le.textEdited.connect(self.pdefault_changed)
default_layout.addWidget(self.default_le)
self.default_chkl = QLabel()
default_layout.addWidget(self.default_chkl)
param_setup_formlayout.addRow('Default', default_layout)
self.unit_le = QLineEdit()
self.unit_le.setToolTip('Set the unit for the parameter (optional)')
self.unit_le.textEdited.connect(self.punit_changed)
param_setup_formlayout.addRow('Unit', self.unit_le)
self.description_le = QLineEdit()
self.description_le.setToolTip('Short description of the parameter (optional)')
self.description_le.textEdited.connect(self.pdescription_changed)
param_setup_formlayout.addRow('Description', self.description_le)
guitype_layout = QHBoxLayout()
self.guitype_cmbx = QComboBox()
self.guitype_cmbx.setToolTip('Choose the GUI from the available GUIs')
self.guitype_cmbx.activated.connect(self.guitype_cmbx_changed)
guitype_layout.addWidget(self.guitype_cmbx)
test_bt = QPushButton('Test')
test_bt.clicked.connect(self.show_param_gui)
guitype_layout.addWidget(test_bt)
self.guitype_chkl = QLabel()
guitype_layout.addWidget(self.guitype_chkl)
param_setup_formlayout.addRow('GUI-Type', guitype_layout)
self.guiargs_bt = QPushButton('Edit')
self.guiargs_bt.clicked.connect(partial(EditGuiArgsDlg, self))
self.guiargs_bt.setToolTip('Set Arguments for the GUI in a dict (optional)')
param_setup_formlayout.addRow('Additional Options', self.guiargs_bt)
param_setup_layout.addLayout(param_setup_formlayout)
self.param_setup_gbox.setLayout(param_setup_layout)
setup_layout.addWidget(self.param_setup_gbox)
layout.addLayout(setup_layout)
bt_layout = QHBoxLayout()
save_bt = QPushButton('Save')
save_bt.setFont(QFont(QS().value('app_font'), 16))
save_bt.clicked.connect(self.save_pkg)
bt_layout.addWidget(save_bt)
src_bt = QPushButton('Show Code')
src_bt.setFont(QFont(QS().value('app_font'), 16))
src_bt.clicked.connect(self.show_code)
bt_layout.addWidget(src_bt)
close_bt = QPushButton('Quit')
close_bt.setFont(QFont(QS().value('app_font'), 16))
close_bt.clicked.connect(self.close)
bt_layout.addWidget(close_bt)
layout.addLayout(bt_layout)
self.setLayout(layout)
self.populate_target_cmbx()
self.populate_tab_cmbx()
self.populate_group_cmbx()
self.populate_guitype_cmbx()
def update_func_cmbx(self):
self.func_cmbx.clear()
self.func_cmbx.insertItems(0, self.add_pd_funcs.index)
try:
current_index = list(self.add_pd_funcs.index).index(self.current_function)
except ValueError:
current_index = 0
self.func_cmbx.setCurrentIndex(current_index)
def clear_func_items(self):
self.falias_le.clear()
self.target_cmbx.setCurrentIndex(-1)
self.target_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.tab_cmbx.setCurrentIndex(-1)
self.tab_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.group_cmbx.setCurrentIndex(-1)
self.group_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.mtpl_yesbt.setChecked(False)
self.mtpl_nobt.setChecked(False)
self.mtpl_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.myv_nobt.setChecked(False)
self.myv_nobt.setChecked(False)
self.myv_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
def clear_param_items(self):
self.update_param_view()
self.palias_le.clear()
self.default_le.clear()
self.default_chkl.setPixmap(self.no_icon.pixmap(QSize(16, 16)))
self.unit_le.clear()
self.guitype_cmbx.setCurrentIndex(-1)
self.guitype_chkl.setPixmap(self.yes_icon.pixmap(QSize(16, 16)))
self.param_setup_gbox.setEnabled(False)
def func_item_selected(self, text):
if text:
self.current_function = text
self.update_code_editor()
self.update_func_setup()
if any([self.current_function in str(x) for x in self.add_pd_params['functions']]):
self.param_setup_gbox.setEnabled(True)
self.update_param_view()
self.current_parameter = \
self.add_pd_params.loc[
[self.current_function in str(x) for x in self.add_pd_params['functions']]].index[0]
self.update_exst_param_label()
self.update_param_setup()
else:
self.update_exst_param_label()
# Clear existing entries
self.clear_param_items()
def param_item_selected(self, current):
self.current_parameter = self.param_model.getData(current)
self.update_param_setup()
self.update_code_editor()
def update_func_setup(self):
if
|
pd.notna(self.add_pd_funcs.loc[self.current_function, 'alias'])
|
pandas.notna
|
"""
Runs a model on a single node on 4 GPUs.
"""
import os
import csv
import pytorch_lightning as pl
import numpy as np
import pandas as pd
import torch
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.loggers import TestTubeLogger
from datetime import datetime
from argparse import ArgumentParser
from model import DSANet
from datautil import DataUtil
import _pickle as pickle
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import csv
SEED = 7
torch.manual_seed(SEED)
np.random.seed(SEED)
val_results = pd.DataFrame()
test_results = pd.DataFrame()
def optimize(optimizer_params):
"""
Main training routine specific for this project
"""
global val_results, test_results
global val_out_file, test_out_file, ITERATION, epochs
ITERATION += 1
root_dir = os.path.dirname(os.path.realpath(__file__))
# although we user hyperOptParser, we are using it only as argparse right now
parent_parser = ArgumentParser( add_help=False)
# allow model to overwrite or extend args
parser = DSANet.add_model_specific_args(parent_parser, root_dir)
hyperparams = parser.parse_args()
dataset = DataUtil(hyperparams, 2)
if hasattr(dataset, 'scale'):
#print('we have scale')
setattr(hyperparams, 'scale', dataset.scale)
#print(dataset.scale)
if hasattr(dataset, 'scaler'):
#print('we have scaler')
setattr(hyperparams, 'scaler', dataset.scaler)
#rint(dataset.scaler)
setattr(hyperparams, 'n_multiv', dataset.m)
setattr(hyperparams, 'batch_size', int(optimizer_params['batch_size']))
setattr(hyperparams, 'drop_prob', optimizer_params['dropout'])
setattr(hyperparams, 'learning_rate', optimizer_params['learning_rate'])
setattr(hyperparams, 'd_model', int(optimizer_params['units']))
setattr(hyperparams, 'local', int(optimizer_params['local']))
setattr(hyperparams, 'n_kernels', int(optimizer_params['n_kernels']))
setattr(hyperparams, 'window', int(optimizer_params['window']))
hparams = hyperparams
print(f"\n#######\nTESTING hparams: mv:{hparams.n_multiv}, bs:{hparams.batch_size}, drop:{hparams.drop_prob}, lr:{hparams.learning_rate}, d_model:{hparams.d_model}, local:{hparams.local}, n_kernels:{hparams.n_kernels}, window:{hparams.window}\n#######")
# ------------------------
# 1 INIT LIGHTNING MODEL
# ------------------------
print('loading model...')
model = DSANet(hparams)
print('model built')
# ------------------------
# 2 INIT TEST TUBE EXP
# ------------------------
filename = '{}{}{}{}{}{}'.format('my_dsanet_', hparams.data_name ,'_', hparams.powerset, '_', str(hparams.calendar))
logger = TestTubeLogger("tb_logs_v2", filename)
# ------------------------
# 3 DEFINE CALLBACKS
# ------------------------
early_stop_callback = EarlyStopping(
monitor='val_loss',
patience=5,
verbose=False,
mode='min'
)
# ------------------------
# 4 INIT TRAINER
# ------------------------
trainer = pl.Trainer(
gpus=4,
distributed_backend='dp',
logger=logger,
early_stop_callback=early_stop_callback,
show_progress_bar=False,
profiler=True,
fast_dev_run=False,
max_epochs=100
)
# ------------------------
# 5 START TRAINING
# ------------------------
st_time = datetime.now()
result = trainer.fit(model)
eval_result = model.val_results
df1=pd.DataFrame(eval_result, [ITERATION])
print(result)
eval_time = str(datetime.now() - st_time)
print(f"Train time: {eval_time}, Results: {eval_result}")
st_time = datetime.now()
model.hparams.mcdropout = 'True'
trainer.test(model)
eval_time = str(datetime.now() - st_time)
test_result = model.test_results
df2 = pd.DataFrame(test_result, [ITERATION])
print(f"Test time: {eval_time}, Results: {test_result}")
df1 = pd.concat([df1, pd.DataFrame(vars(hparams), [ITERATION])], axis=1, sort=False)
df2 = pd.concat([df2, pd.DataFrame(vars(hparams), [ITERATION])], axis=1, sort=False)
val_results =
|
pd.concat([val_results, df1], axis=0, sort=False)
|
pandas.concat
|
import pandas as pd
import numpy as np
from datetime import date
"""
dataset split:
(date_received)
dateset3: 20160701~20160731 (113640),features3 from 20160315~20160630 (off_test)
dateset2: 20160515~20160615 (258446),features2 from 20160201~20160514
dateset1: 20160414~20160514 (138303),features1 from 20160101~20160413
1.merchant related:
sales_use_coupon. total_coupon
transfer_rate = sales_use_coupon/total_coupon.
merchant_avg_distance,merchant_min_distance,merchant_max_distance of those use coupon
total_sales. coupon_rate = sales_use_coupon/total_sales.
2.coupon related:
discount_rate. discount_man. discount_jian. is_man_jian
day_of_week,day_of_month. (date_received)
3.user related:
distance.
user_avg_distance, user_min_distance,user_max_distance.
buy_use_coupon. buy_total. coupon_received.
buy_use_coupon/coupon_received.
avg_diff_date_datereceived. min_diff_date_datereceived. max_diff_date_datereceived.
count_merchant.
4.user_merchant:
times_user_buy_merchant_before.
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#1754884 record,1053282 with coupon_id,9738 coupon. date_received:20160101~20160615,date:20160101~20160630, 539438 users, 8415 merchants
off_train = pd.read_csv('data/ccf_offline_stage1_train.csv',header=None)
off_train.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received','date']
#2050 coupon_id. date_received:20160701~20160731, 76309 users(76307 in trainset, 35965 in online_trainset), 1559 merchants(1558 in trainset)
off_test = pd.read_csv('data/ccf_offline_stage1_test_revised.csv',header=None)
off_test.columns = ['user_id','merchant_id','coupon_id','discount_rate','distance','date_received']
#11429826 record(872357 with coupon_id),762858 user(267448 in off_train)
on_train = pd.read_csv('data/ccf_online_stage1_train.csv',header=None)
on_train.columns = ['user_id','merchant_id','action','coupon_id','discount_rate','date_received','date']
dataset3 = off_test
feature3 = off_train[((off_train.date>='20160315')&(off_train.date<='20160630'))|((off_train.date=='null')&(off_train.date_received>='20160315')&(off_train.date_received<='20160630'))]
dataset2 = off_train[(off_train.date_received>='20160515')&(off_train.date_received<='20160615')]
feature2 = off_train[(off_train.date>='20160201')&(off_train.date<='20160514')|((off_train.date=='null')&(off_train.date_received>='20160201')&(off_train.date_received<='20160514'))]
dataset1 = off_train[(off_train.date_received>='20160414')&(off_train.date_received<='20160514')]
feature1 = off_train[(off_train.date>='20160101')&(off_train.date<='20160413')|((off_train.date=='null')&(off_train.date_received>='20160101')&(off_train.date_received<='20160413'))]
############# other feature ##################3
"""
5. other feature:
this_month_user_receive_all_coupon_count
this_month_user_receive_same_coupon_count
this_month_user_receive_same_coupon_lastone
this_month_user_receive_same_coupon_firstone
this_day_user_receive_all_coupon_count
this_day_user_receive_same_coupon_count
day_gap_before, day_gap_after (receive the same coupon)
"""
#for dataset3
t = dataset3[['user_id']]
t['this_month_user_receive_all_coupon_count'] = 1
t = t.groupby('user_id').agg('sum').reset_index()
t1 = dataset3[['user_id','coupon_id']]
t1['this_month_user_receive_same_coupon_count'] = 1
t1 = t1.groupby(['user_id','coupon_id']).agg('sum').reset_index()
t2 = dataset3[['user_id','coupon_id','date_received']]
t2.date_received = t2.date_received.astype('str')
t2 = t2.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t2['receive_number'] = t2.date_received.apply(lambda s:len(s.split(':')))
t2 = t2[t2.receive_number>1]
t2['max_date_received'] = t2.date_received.apply(lambda s:max([int(d) for d in s.split(':')]))
t2['min_date_received'] = t2.date_received.apply(lambda s:min([int(d) for d in s.split(':')]))
t2 = t2[['user_id','coupon_id','max_date_received','min_date_received']]
t3 = dataset3[['user_id','coupon_id','date_received']]
t3 = pd.merge(t3,t2,on=['user_id','coupon_id'],how='left')
t3['this_month_user_receive_same_coupon_lastone'] = t3.max_date_received - t3.date_received
t3['this_month_user_receive_same_coupon_firstone'] = t3.date_received - t3.min_date_received
def is_firstlastone(x):
if x==0:
return 1
elif x>0:
return 0
else:
return -1 #those only receive once
t3.this_month_user_receive_same_coupon_lastone = t3.this_month_user_receive_same_coupon_lastone.apply(is_firstlastone)
t3.this_month_user_receive_same_coupon_firstone = t3.this_month_user_receive_same_coupon_firstone.apply(is_firstlastone)
t3 = t3[['user_id','coupon_id','date_received','this_month_user_receive_same_coupon_lastone','this_month_user_receive_same_coupon_firstone']]
t4 = dataset3[['user_id','date_received']]
t4['this_day_user_receive_all_coupon_count'] = 1
t4 = t4.groupby(['user_id','date_received']).agg('sum').reset_index()
t5 = dataset3[['user_id','coupon_id','date_received']]
t5['this_day_user_receive_same_coupon_count'] = 1
t5 = t5.groupby(['user_id','coupon_id','date_received']).agg('sum').reset_index()
t6 = dataset3[['user_id','coupon_id','date_received']]
t6.date_received = t6.date_received.astype('str')
t6 = t6.groupby(['user_id','coupon_id'])['date_received'].agg(lambda x:':'.join(x)).reset_index()
t6.rename(columns={'date_received':'dates'},inplace=True)
def get_day_gap_before(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))-date(int(d[0:4]),int(d[4:6]),int(d[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
def get_day_gap_after(s):
date_received,dates = s.split('-')
dates = dates.split(':')
gaps = []
for d in dates:
this_gap = (date(int(d[0:4]),int(d[4:6]),int(d[6:8]))-date(int(date_received[0:4]),int(date_received[4:6]),int(date_received[6:8]))).days
if this_gap>0:
gaps.append(this_gap)
if len(gaps)==0:
return -1
else:
return min(gaps)
t7 = dataset3[['user_id','coupon_id','date_received']]
t7 = pd.merge(t7,t6,on=['user_id','coupon_id'],how='left')
t7['date_received_date'] = t7.date_received.astype('str') + '-' + t7.dates
t7['day_gap_before'] = t7.date_received_date.apply(get_day_gap_before)
t7['day_gap_after'] = t7.date_received_date.apply(get_day_gap_after)
t7 = t7[['user_id','coupon_id','date_received','day_gap_before','day_gap_after']]
other_feature3 = pd.merge(t1,t,on='user_id')
other_feature3 = pd.merge(other_feature3,t3,on=['user_id','coupon_id'])
other_feature3 = pd.merge(other_feature3,t4,on=['user_id','date_received'])
other_feature3 = pd.merge(other_feature3,t5,on=['user_id','coupon_id','date_received'])
other_feature3 =
|
pd.merge(other_feature3,t7,on=['user_id','coupon_id','date_received'])
|
pandas.merge
|
import numpy as np
import pandas as pd
def read_app(file_path):
reader = pd.read_table(file_path, header=None, chunksize=10000)
data = pd.concat(reader, axis=0, ignore_index=True)
data.columns = ['id', 'apps']
return data
def get_apps_dummy(data):
"""
把dat_app里用户装的app信息0-1化
1. 读取需要的104个app:app_104_list
2. 然后得到长度为‘len(app_104_list)’的0-1向量
"""
def is_in_all_apps(x):
xs = x.split(',')
xs = set(xs)
app_vec = list(map(lambda app: int(app in xs), app_66))
return app_vec
apps_dummy_0 = list(map(is_in_all_apps, data['apps']))
apps_dummy_1 = pd.DataFrame(apps_dummy_0, columns=app_66)
apps_dummy_2 = pd.concat([data[['id']], apps_dummy_1], axis=1)
return apps_dummy_2
if __name__ == '__main__':
input_path = './'
sample_train =
|
pd.read_table('./open_data/sample_train.txt')
|
pandas.read_table
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 15:42:36 2021
@author: nicolasnavarre
"""
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
figures = 'figures/'
def production_by_group(POM_data):
POM_global = pd.DataFrame()
POM_global ["Org"] = POM_data.groupby(["Area"]).apply(lambda x: x["Org per Capita (with waste & feed)"].sum())
POM_global ["EAT"] = POM_data.groupby(["Area"]).apply(lambda x: x["EAT per Capita (with waste & feed)"].sum())
import numpy as np
for j in POM_data['GROUP'].unique().tolist():
food_list_eat = []
food_list_org = []
group_list = []
diet_df = pd.DataFrame()
width = 0.35
fig, ax = plt.subplots()
temp_count = 0
temp_counteat = 0
food_data = POM_data.groupby(["EAT_group"]).apply(lambda x: (x["POM"].sum()/(x["Population (2016), 1000person"].sum()*1000)))
if j == 'Other':
continue
for i in food_data.index:
df_temp = POM_data.loc[(POM_data['EAT_group'] == i) & (POM_data['GROUP'] == j)]
df_pop = df_temp[['Area', 'Population (2016), 1000person']]
df_pop = df_pop.drop_duplicates()
Org_avg = df_temp.groupby(["Area"]).apply(lambda x: (x["POM Org (with waste & feed)"].sum()))
EAT_avg = df_temp.groupby(["Area"]).apply(lambda x: (x["POM EAT (with waste & feed)"].sum()))
Org_avg = pd.DataFrame(Org_avg)
Org_avg = Org_avg.reset_index()
Org_avg = pd.merge(Org_avg, df_pop, on = 'Area', how = 'left')
Org_avg = Org_avg.rename(columns = {0 : 'food'})
Org_food = ((Org_avg['food'] * 1000 * 1000 * 1000) / 365 ).sum() / (Org_avg["Population (2016), 1000person"] * 1000).sum()
EAT_avg = pd.DataFrame(EAT_avg)
EAT_avg = EAT_avg.reset_index()
EAT_avg = pd.merge(EAT_avg, df_pop, on = 'Area', how = 'left')
EAT_avg = EAT_avg.rename(columns = {0 : 'food'})
EAT_food = ((EAT_avg['food'] * 1000 * 1000 * 1000) / 365 ).sum() / (EAT_avg["Population (2016), 1000person"] * 1000).sum()
temp_count += Org_food
temp_counteat += EAT_food
food_list_eat.append(EAT_food)
food_list_org.append(Org_food)
group_list.append(i)
x = np.arange(len(group_list))
diet_df['group'] = group_list
diet_df['gF EAT'] = food_list_eat
diet_df['gF Org'] = food_list_org
diet_df['dif'] = diet_df['gF Org'] - diet_df['gF EAT']
diet_df = diet_df.sort_values(by=['dif'], ascending=False)
ax.bar(x + width/2, diet_df['gF EAT'], width, label='EAT Diet', color = 'g')
ax.bar(x - width/2, diet_df['gF Org'], width, label='BAU Diet', color = 'r')
ax.set_ylabel('Prod/capita (g/person-day)')
ax.set_xticks(x)
ax.set_xticklabels(diet_df['group'])
pos_values = len(diet_df[diet_df["dif"]>0])
ax.axvspan(-0.5, pos_values-0.5, facecolor='0.2', alpha=0.25, zorder=-100)
plt.xticks(rotation = 90)
legend_elements = [Line2D([0], [0], lw = 0, marker='s', color='r', label='Current Diet\nTotal = '+str(int(temp_count))+' g/d',\
markerfacecolor='r'),
Line2D([0], [0], lw = 0, marker='s', color='g', label='EAT Lancet Diet\nTotal = '+str(int(temp_counteat))+' g/d',\
markerfacecolor='g')]
lg = ax.legend(handles=legend_elements)
fig.savefig(figures+j+" EAT_Group Production.png", bbox_extra_artists=(lg,), bbox_inches='tight', dpi = 400)
plt.close()
plt.close()
temp_count = 0
temp_counteat = 0
food_data = POM_data.groupby(["EAT_group"]).apply(lambda x: (x["POM"].sum()/(x["Population (2016), 1000person"].sum()*1000)))
x_labels = []
eat_bar = []
org_bar = []
group_list = []
food_list_eat = []
food_list_org = []
cal_list_eat = []
cal_list_org = []
diet_df = pd.DataFrame()
extra_nations = ['Puerto Rico', 'Palestine', 'Greenland', 'Falkland Islands (Malvinas)'\
'New Caledonia', 'China', 'China, Taiwan Province of' ]
POM_data['OrgCal perD']= ((POM_data['POM (no waste)']*10**9)/365 * POM_data['calories per g'])/(POM_data["Population (2016), 1000person"]*1000)
for i in food_data.index:
df_temp = POM_data.loc[(POM_data['EAT_group'] == i)]
df_pop = df_temp[['Area', 'Population (2016), 1000person']]
df_pop = df_pop.drop_duplicates()
Org_avg = df_temp.groupby(["Area"]).apply(lambda x: (x["POM Org (with waste & feed)"].sum()))
EAT_avg = df_temp.groupby(["Area"]).apply(lambda x: (x["POM EAT (with waste & feed)"].sum()))
Org_cal = df_temp.groupby(["Area"]).apply(lambda x: (x["OrgCal perD"].sum()))
EAT_cal = df_temp.groupby(["Area"]).apply(lambda x: (x["Cal Needed"].sum()))
Org_avg =
|
pd.DataFrame(Org_avg)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on 2017-8-24
@author: cheng.li
"""
import numpy as np
import pandas as pd
import copy
from sklearn.linear_model import *
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.svm import NuSVR
from alphamind.api import *
from PyFin.api import *
from matplotlib import pyplot as plt
plt.style.use('ggplot')
'''
Settings:
universe - zz500
neutralize - all industries
benchmark - zz500
base factors - all the risk styles
quantiles - 5
start_date - 2012-01-01
end_date - 2017-08-01
re-balance - 2 week
training - every 8 week
'''
engine = SqlEngine('postgresql+psycopg2://postgres:[email protected]/alpha')
universe = Universe('zz500', ['zz500'])
neutralize_risk = industry_styles
portfolio_risk_neutralize = []
portfolio_industry_neutralize = True
alpha_factors = {
'eps': LAST('eps_q'),
'roe': LAST('roe_q'),
'bdto': LAST('BDTO'),
'cfinc1': LAST('CFinc1'),
'chv': LAST('CHV'),
'rvol': LAST('RVOL'),
'val': LAST('VAL'),
'grev': LAST('GREV'),
'droeafternonorecurring': LAST('DROEAfterNonRecurring')}
benchmark = 905
n_bins = 5
frequency = '2w'
batch = 8
start_date = '2012-01-01'
end_date = '2017-11-05'
method = 'risk_neutral'
use_rank = 100
'''
fetch data from target data base and do the corresponding data processing
'''
data_package = fetch_data_package(engine,
alpha_factors=alpha_factors,
start_date=start_date,
end_date=end_date,
frequency=frequency,
universe=universe,
benchmark=benchmark,
batch=batch,
neutralized_risk=neutralize_risk,
pre_process=[winsorize_normal, standardize],
post_process=[winsorize_normal, standardize],
warm_start=batch)
'''
training phase: using Linear - regression from scikit-learn
'''
train_x = data_package['train']['x']
train_y = data_package['train']['y']
dates = sorted(train_x.keys())
model_df = pd.Series()
features = data_package['x_names']
for train_date in dates:
model = LinearRegression(features, fit_intercept=False)
x = train_x[train_date]
y = train_y[train_date]
model.fit(x, y)
model_df.loc[train_date] = model
alpha_logger.info('trade_date: {0} training finished'.format(train_date))
'''
predicting phase: using trained model on the re-balance dates (optimizing with risk neutral)
'''
predict_x = data_package['predict']['x']
settlement = data_package['settlement']
industry_dummies =
|
pd.get_dummies(settlement['industry'].values)
|
pandas.get_dummies
|
# %% [markdown]
# This notebook is a -modified- VSCode notebook version of:
# https://www.kaggle.com/sheriytm/brewed-tpot-for-nyc-with-love-lb0-37
#
# You could find the train data from:
# https://www.kaggle.com/c/nyc-taxi-trip-duration/data
# You could find the fastest routes data from:
# https://www.kaggle.com/oscarleo/new-york-city-taxi-with-osrm
## All the data files should be in the same directory with this file!
#%%
# Importing necessary libraries
import os
import numpy as np
import os
import pandas as pd
from haversine import haversine
import datetime as dt
#%%
# Loading training data
train = pd.read_csv('train.csv')
#%%
# Long and painful future generation part
from sklearn.decomposition import PCA
from sklearn.cluster import MiniBatchKMeans
t0 = dt.datetime.now()
train['pickup_datetime'] = pd.to_datetime(train.pickup_datetime)
train.loc[:, 'pickup_date'] = train['pickup_datetime'].dt.date
train['dropoff_datetime'] =
|
pd.to_datetime(train.dropoff_datetime)
|
pandas.to_datetime
|
"""
Tests that the file header is properly handled or inferred
during parsing for all of the parsers defined in parsers.py
"""
from collections import namedtuple
from io import StringIO
import numpy as np
import pytest
from pandas.errors import ParserError
from pandas import (
DataFrame,
Index,
MultiIndex,
)
import pandas._testing as tm
# TODO(1.4): Change me to xfails at release time
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
@skip_pyarrow
def test_read_with_bad_header(all_parsers):
parser = all_parsers
msg = r"but only \d+ lines in file"
with pytest.raises(ValueError, match=msg):
s = StringIO(",,")
parser.read_csv(s, header=[10])
def test_negative_header(all_parsers):
# see gh-27779
parser = all_parsers
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
with pytest.raises(
ValueError,
match="Passing negative integer to header is invalid. "
"For no header, use header=None instead",
):
parser.read_csv(StringIO(data), header=-1)
@pytest.mark.parametrize("header", [([-1, 2, 4]), ([-5, 0])])
def test_negative_multi_index_header(all_parsers, header):
# see gh-27779
parser = all_parsers
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
with pytest.raises(
ValueError, match="cannot specify multi-index header with negative integers"
):
parser.read_csv(StringIO(data), header=header)
@pytest.mark.parametrize("header", [True, False])
def test_bool_header_arg(all_parsers, header):
# see gh-6114
parser = all_parsers
data = """\
MyColumn
a
b
a
b"""
msg = "Passing a bool to header is invalid"
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), header=header)
def test_no_header_prefix(all_parsers):
parser = all_parsers
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
result = parser.read_csv(StringIO(data), prefix="Field", header=None)
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
columns=["Field0", "Field1", "Field2", "Field3", "Field4"],
)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_header_with_index_col(all_parsers):
parser = all_parsers
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ["A", "B", "C"]
result = parser.read_csv(StringIO(data), names=names)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result, expected)
def test_header_not_first_line(all_parsers):
parser = all_parsers
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
result = parser.read_csv(StringIO(data), header=2, index_col=0)
expected = parser.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_header_multi_index(all_parsers):
parser = all_parsers
expected = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
result = parser.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[0, 1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,msg",
[
(
{"index_col": ["foo", "bar"]},
(
"index_col must only contain "
"row numbers when specifying "
"a multi-index header"
),
),
(
{"index_col": [0, 1], "names": ["foo", "bar"]},
("cannot specify names when specifying a multi-index header"),
),
(
{"index_col": [0, 1], "usecols": ["foo", "bar"]},
("cannot specify usecols when specifying a multi-index header"),
),
],
)
def test_header_multi_index_invalid(all_parsers, kwargs, msg):
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=[0, 1, 2, 3], **kwargs)
_TestTuple = namedtuple("_TestTuple", ["first", "second"])
@skip_pyarrow
@pytest.mark.parametrize(
"kwargs",
[
{"header": [0, 1]},
{
"skiprows": 3,
"names": [
("a", "q"),
("a", "r"),
("a", "s"),
("b", "t"),
("c", "u"),
("c", "v"),
],
},
{
"skiprows": 3,
"names": [
_TestTuple("a", "q"),
_TestTuple("a", "r"),
_TestTuple("a", "s"),
_TestTuple("b", "t"),
_TestTuple("c", "u"),
_TestTuple("c", "v"),
],
},
],
)
def test_header_multi_index_common_format1(all_parsers, kwargs):
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=["one", "two"],
columns=MultiIndex.from_tuples(
[("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")]
),
)
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), index_col=0, **kwargs)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize(
"kwargs",
[
{"header": [0, 1]},
{
"skiprows": 2,
"names": [
("a", "q"),
("a", "r"),
("a", "s"),
("b", "t"),
("c", "u"),
("c", "v"),
],
},
{
"skiprows": 2,
"names": [
_TestTuple("a", "q"),
_TestTuple("a", "r"),
_TestTuple("a", "s"),
_TestTuple("b", "t"),
_TestTuple("c", "u"),
_TestTuple("c", "v"),
],
},
],
)
def test_header_multi_index_common_format2(all_parsers, kwargs):
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=["one", "two"],
columns=MultiIndex.from_tuples(
[("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")]
),
)
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), index_col=0, **kwargs)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize(
"kwargs",
[
{"header": [0, 1]},
{
"skiprows": 2,
"names": [
("a", "q"),
("a", "r"),
("a", "s"),
("b", "t"),
("c", "u"),
("c", "v"),
],
},
{
"skiprows": 2,
"names": [
_TestTuple("a", "q"),
_TestTuple("a", "r"),
_TestTuple("a", "s"),
_TestTuple("b", "t"),
_TestTuple("c", "u"),
_TestTuple("c", "v"),
],
},
],
)
def test_header_multi_index_common_format3(all_parsers, kwargs):
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=["one", "two"],
columns=MultiIndex.from_tuples(
[("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")]
),
)
expected = expected.reset_index(drop=True)
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), index_col=None, **kwargs)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
def test_header_multi_index_common_format_malformed1(all_parsers):
parser = all_parsers
expected = DataFrame(
np.array([[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype="int64"),
index=Index([1, 7]),
columns=MultiIndex(
levels=[["a", "b", "c"], ["r", "s", "t", "u", "v"]],
codes=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=["a", "q"],
),
)
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
@skip_pyarrow
def test_header_multi_index_common_format_malformed2(all_parsers):
parser = all_parsers
expected = DataFrame(
np.array([[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype="int64"),
index=Index([1, 7]),
columns=MultiIndex(
levels=[["a", "b", "c"], ["r", "s", "t", "u", "v"]],
codes=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
names=[None, "q"],
),
)
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
@skip_pyarrow
def test_header_multi_index_common_format_malformed3(all_parsers):
parser = all_parsers
expected = DataFrame(
np.array([[3, 4, 5, 6], [9, 10, 11, 12]], dtype="int64"),
index=MultiIndex(levels=[[1, 7], [2, 8]], codes=[[0, 1], [0, 1]]),
columns=MultiIndex(
levels=[["a", "b", "c"], ["s", "t", "u", "v"]],
codes=[[0, 1, 2, 2], [0, 1, 2, 3]],
names=[None, "q"],
),
)
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = parser.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
@skip_pyarrow
def test_header_multi_index_blank_line(all_parsers):
# GH 40442
parser = all_parsers
data = [[None, None], [1, 2], [3, 4]]
columns = MultiIndex.from_tuples([("a", "A"), ("b", "B")])
expected = DataFrame(data, columns=columns)
data = "a,b\nA,B\n,\n1,2\n3,4"
result = parser.read_csv(StringIO(data), header=[0, 1])
tm.assert_frame_equal(expected, result)
@skip_pyarrow
@pytest.mark.parametrize(
"data,header", [("1,2,3\n4,5,6", None), ("foo,bar,baz\n1,2,3\n4,5,6", 0)]
)
def test_header_names_backward_compat(all_parsers, data, header):
# see gh-2539
parser = all_parsers
expected = parser.read_csv(StringIO("1,2,3\n4,5,6"), names=["a", "b", "c"])
result = parser.read_csv(StringIO(data), names=["a", "b", "c"], header=header)
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize("kwargs", [{}, {"index_col": False}])
def test_read_only_header_no_rows(all_parsers, kwargs):
# See gh-7773
parser = all_parsers
expected = DataFrame(columns=["a", "b", "c"])
result = parser.read_csv(StringIO("a,b,c"), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,names",
[
({}, [0, 1, 2, 3, 4]),
({"prefix": "X"}, ["X0", "X1", "X2", "X3", "X4"]),
(
{"names": ["foo", "bar", "baz", "quux", "panda"]},
["foo", "bar", "baz", "quux", "panda"],
),
],
)
def test_no_header(all_parsers, kwargs, names):
parser = all_parsers
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], columns=names
)
result = parser.read_csv(StringIO(data), header=None, **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("header", [["a", "b"], "string_header"])
def test_non_int_header(all_parsers, header):
# see gh-16338
msg = "header must be integer or list of integers"
data = """1,2\n3,4"""
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=header)
@skip_pyarrow
def test_singleton_header(all_parsers):
# see gh-7757
data = """a,b,c\n0,1,2\n1,2,3"""
parser = all_parsers
expected = DataFrame({"a": [0, 1], "b": [1, 2], "c": [2, 3]})
result = parser.read_csv(StringIO(data), header=[0])
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize(
"data,expected",
[
(
"A,A,A,B\none,one,one,two\n0,40,34,0.1",
DataFrame(
[[0, 40, 34, 0.1]],
columns=MultiIndex.from_tuples(
[("A", "one"), ("A", "one.1"), ("A", "one.2"), ("B", "two")]
),
),
),
(
"A,A,A,B\none,one,one.1,two\n0,40,34,0.1",
DataFrame(
[[0, 40, 34, 0.1]],
columns=MultiIndex.from_tuples(
[("A", "one"), ("A", "one.1"), ("A", "one.1.1"), ("B", "two")]
),
),
),
(
"A,A,A,B,B\none,one,one.1,two,two\n0,40,34,0.1,0.1",
DataFrame(
[[0, 40, 34, 0.1, 0.1]],
columns=MultiIndex.from_tuples(
[
("A", "one"),
("A", "one.1"),
("A", "one.1.1"),
("B", "two"),
("B", "two.1"),
]
),
),
),
],
)
def test_mangles_multi_index(all_parsers, data, expected):
# see gh-18062
parser = all_parsers
result = parser.read_csv(StringIO(data), header=[0, 1])
tm.assert_frame_equal(result, expected)
@skip_pyarrow
@pytest.mark.parametrize("index_col", [None, [0]])
@pytest.mark.parametrize(
"columns", [None, (["", "Unnamed"]), (["Unnamed", ""]), (["Unnamed", "NotUnnamed"])]
)
def test_multi_index_unnamed(all_parsers, index_col, columns):
# see gh-23687
#
# When specifying a multi-index header, make sure that
# we don't error just because one of the rows in our header
# has ALL column names containing the string "Unnamed". The
# correct condition to check is whether the row contains
# ALL columns that did not have names (and instead were given
# placeholder ones).
parser = all_parsers
header = [0, 1]
if index_col is None:
data = ",".join(columns or ["", ""]) + "\n0,1\n2,3\n4,5\n"
else:
data = ",".join([""] + (columns or ["", ""])) + "\n,0,1\n0,2,3\n1,4,5\n"
if columns is None:
msg = (
r"Passed header=\[0,1\] are too "
r"many rows for this multi_index of columns"
)
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=header, index_col=index_col)
else:
result = parser.read_csv(StringIO(data), header=header, index_col=index_col)
exp_columns = []
for i, col in enumerate(columns):
if not col: # Unnamed.
col = f"Unnamed: {i if index_col is None else i + 1}_level_0"
exp_columns.append(col)
columns = MultiIndex.from_tuples(zip(exp_columns, ["0", "1"]))
expected =
|
DataFrame([[2, 3], [4, 5]], columns=columns)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""Test evaluator."""
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from sktime.benchmarking.evaluation import Evaluator
from sktime.benchmarking.metrics import PairwiseMetric
from sktime.benchmarking.results import RAMResults
from sktime.series_as_features.model_selection import PresplitFilesCV
def dummy_results():
"""Results that are dummy."""
results = RAMResults()
results.cv = PresplitFilesCV()
results.save_predictions(
strategy_name="alg1",
dataset_name="dataset1",
index=np.array([1, 2, 3, 4]),
y_true=np.array([1, 1, 1, 1]),
y_pred=np.array([1, 1, 1, 1]),
y_proba=None,
cv_fold=0,
train_or_test="test",
fit_estimator_start_time=pd.to_datetime(1605268800, unit="ms"),
fit_estimator_end_time=pd.to_datetime(1605268801, unit="ms"),
predict_estimator_start_time=pd.to_datetime(1605268800, unit="ms"),
predict_estimator_end_time=pd.to_datetime(1605268801, unit="ms"),
)
results.save_predictions(
strategy_name="alg1",
dataset_name="dataset2",
index=np.array([1, 2, 3, 4]),
y_true=np.array([0, 0, 0, 0]),
y_pred=np.array([0, 0, 0, 0]),
y_proba=None,
cv_fold=0,
train_or_test="test",
fit_estimator_start_time=pd.to_datetime(1605268800, unit="ms"),
fit_estimator_end_time=pd.to_datetime(1605268801, unit="ms"),
predict_estimator_start_time=pd.to_datetime(1605268800, unit="ms"),
predict_estimator_end_time=pd.to_datetime(1605268801, unit="ms"),
)
results.save_predictions(
strategy_name="alg2",
dataset_name="dataset1",
index=np.array([1, 2, 3, 4]),
y_true=np.array([1, 1, 1, 1]),
y_pred=np.array([0, 0, 0, 0]),
y_proba=None,
cv_fold=0,
train_or_test="test",
fit_estimator_start_time=pd.to_datetime(1605268800, unit="ms"),
fit_estimator_end_time=
|
pd.to_datetime(1605268801, unit="ms")
|
pandas.to_datetime
|
"""
This module encodes a dash app for non-experts to examine
patterns in bird sightings and air quality in Oregon.
"""
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
from urllib.request import urlopen
import json
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import pandas as pd
import plotly.express as px
from flask_caching import Cache
from phoenix.code.appfunctions import subset_date, subset_air_quality
# Initialize Dash App
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
colors = {
'background': '#FFFFFF',
'text': '#000000'
}
# Initialize Cache
CACHE_CONFIG = {
'CACHE_TYPE': 'filesystem',
'CACHE_DIR': 'cache',
'CACHE_DEFAULT_TIMEOUT': 180
}
cache = Cache(app.server, config=CACHE_CONFIG)
# Read in and clean data
months = [6, 7, 8, 9]
@cache.cached(key_prefix='county_map')
def get_county_map():
"""
Fetches the Oregon County geojson file for mapping
Args: None
Returns:
counties1 (geojson): Oregon county map file from repo
"""
print("Retrieving Oregon County Map...")
with urlopen('https://raw.githubusercontent.com/emilysellinger/Phoenix/main/phoenix/data/Oregon_counties_map.geojson') as response:# noqa
counties1 = json.load(response)
return counties1
counties = get_county_map()
@cache.cached(key_prefix='aq_data')
def get_aq_data():
"""
Fetches the Oregon air quality data and filters data based on
relevant month and valid air quality entry
Args: None
Returns:
aq1 (pd dataframe): Cleaned air quality data from repo
"""
print("Retrieving Air Quality County Data...")
aq1 = pd.read_csv('phoenix/data/OR_DailyAQ_byCounty.csv')
aq1['Date'] = pd.to_datetime(aq1['Date'])
aq1 = aq1.loc[aq1['Date'].dt.month.isin(months)]
aq1 = aq1[aq1['Avg_PM2.5'].notna()]
return aq1
aq = get_aq_data()
@cache.cached(key_prefix='bird_data')
def get_bird_data():
"""
Fetches the full Oregon eBird dataset and filters data based on
relevant columns and months
Args: None
Returns:
bird1 (pd dataframe): Cleaned eBird data from repo
"""
print("Retrieving Bird Data...")
bird1 =
|
pd.read_csv("phoenix/data/ebird_app_data.csv")
|
pandas.read_csv
|
import ccxt
import sys
import ccxt
import random
import time
import sys
import datetime
import json
import os
import numpy as np
import pandas as pd
from enum import Enum
buy_from_neutral = 0.0025
sell_from_long = -0.001
buy_from_short = +0.001
sell_from_neutral = -0.0025
timeframe = '1m'
base = 'BTC/USDT'
pairs = ['DASH/USDT']
size = 1
live_trading = True
bot_name = 'ext_DASH'
if sys.platform == 'win32':
excel_file = bot_name +'_data.xlsx'
position_file = bot_name +'_position.json'
filename = bot_name +'_signals.csv'
else:
excel_file = '/var/www/html/'+ bot_name +'_mkt_data.xlsx'
position_file = '/var/www/html/'+ bot_name +'_position.json'
filename = '/var/www/html/'+ bot_name +'_signals.csv'
class TradeAction(Enum):
BUY_FROM_NEUTRAL = 1
SELL_FROM_NEUTRAL = 2
SELL_FROM_LONG = 3
BUY_FROM_SHORT = 4
REMAIN_NEUTRAL = 5
def action(signal):
if (signal > buy_from_neutral):
return "long"
elif (signal < sell_from_neutral):
return "short"
else:
return "none"
def get_time(time_str):
return time.strftime("%d %b %Y %H:%M:%S",time.localtime(time_str/1000))
def buy_at(trade,price):
try:
if (trade == "long"):
return price
except:
return ""
def sell_at(trade,price):
try:
if (trade == "short"):
return price
except:
return ""
def rsi(values):
up = values[values>0].mean()
down = -1*values[values<0].mean()
return 100 * up / (up + down)
def loadExchanges():
exchanges = []
binance = ccxt.binance()
binance.apiKey = 'API key'
binance.secret = 'API secret'
exchanges.append(binance)
return exchanges
exchanges = loadExchanges()
with open(position_file) as json_file:
pos = json.load(json_file)
position = pos['position']
for e in exchanges:
for pair in pairs:
#holdings = e.fetchBalance()['free']
#print("BTC: ",holdings['BTC'])
#print("USDT: ", holdings['USDT'])
ind_trades = e.fetch_trades(base)
df_ind_trades = pd.DataFrame.from_records(ind_trades, columns = ['amount' , 'cost', 'price','side','timestamp','id','symbol'])
df_ind_trades['trade_time'] = df_ind_trades.apply(lambda x: get_time(x['timestamp']), axis=1)
#Get Data
#print('get data')
x = datetime.datetime.now()
datestr = str(x.strftime('%Y/%m/%d %H:%M:%S'))
orderbook = e.fetch_order_book (pair)
df_bid_orders = pd.DataFrame(orderbook['bids'],columns = ['price','units'])
df_ask_orders = pd.DataFrame(orderbook['asks'],columns = ['price','units'])
bid_sum = df_bid_orders[df_bid_orders['price'] > float(orderbook['bids'][0][0])*0.999]['units'].sum()
ask_sum = df_ask_orders[df_ask_orders['price'] < float(orderbook['asks'][0][0])*1.001]['units'].sum()
ask = float(orderbook['asks'][0][0])
bid = float(orderbook['bids'][0][0])
#df_bid_orders['EWMA_90'] = df_bid_orders['units'].ewm(com=0.9).mean()
#df_ask_orders['EWMA_90'] = df_ask_orders['units'].ewm(com=0.9).mean()
ohlcv_1d = e.fetch_ohlcv (pair, '1d')
ind_ohlcv = e.fetch_ohlcv ('BTC/USDT', timeframe)
ohlcv = e.fetch_ohlcv (pair, timeframe)
trades = e.fetch_trades(pair)
#my_trades = e.fetch_my_trades(pair)
#df_my_trades = pd.DataFrame.from_records(my_trades, columns = ['amount' , 'cost', 'price','side','timestamp','id','symbol'])
#df_my_trades['trade_time'] = df_my_trades.apply(lambda x: get_time(x['timestamp']), axis=1)
df_ind_ohlcv = pd.DataFrame.from_records(ind_ohlcv, columns = ['timestamp' , 'open', 'high','low','close','volume'])
df_ind_ohlcv['trade_time'] = df_ind_ohlcv.apply(lambda x: get_time(x['timestamp']), axis=1)
df_trades =
|
pd.DataFrame.from_records(trades, columns = ['amount' , 'cost', 'price','side','timestamp','id','symbol'])
|
pandas.DataFrame.from_records
|
import inspect, os
import pandas as pd
import time
from db import extract
from db import log
from forecast import compute
from forecast import util
"""
Demographic and Economic Forecasting Model
a simulation using rate versions in model_config.yml input config file
rate versions refer to original data source
"""
# Housekeeping stuff
# measure script time
start_time = time.time()
# change to current directory to find .yml input config file
full_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
os.chdir(os.path.dirname(full_path))
# set console display to show MultiIndex for every row
|
pd.set_option('display.multi_sparse', False)
|
pandas.set_option
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
lreshape,
melt,
wide_to_long,
)
import pandas._testing as tm
class TestMelt:
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
self.df["id1"] = (self.df["A"] > 0).astype(np.int64)
self.df["id2"] = (self.df["B"] > 0).astype(np.int64)
self.var_name = "var"
self.value_name = "val"
self.df1 = DataFrame(
[
[1.067683, -1.110463, 0.20867],
[-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298, -0.873361],
]
)
self.df1.columns = [list("ABC"), list("abc")]
self.df1.columns.names = ["CAP", "low"]
def test_top_level_method(self):
result = melt(self.df)
assert result.columns.tolist() == ["variable", "value"]
def test_method_signatures(self):
tm.assert_frame_equal(self.df.melt(), melt(self.df))
tm.assert_frame_equal(
self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]),
melt(self.df, id_vars=["id1", "id2"], value_vars=["A", "B"]),
)
tm.assert_frame_equal(
self.df.melt(var_name=self.var_name, value_name=self.value_name),
melt(self.df, var_name=self.var_name, value_name=self.value_name),
)
tm.assert_frame_equal(self.df1.melt(col_level=0), melt(self.df1, col_level=0))
def test_default_col_names(self):
result = self.df.melt()
assert result.columns.tolist() == ["variable", "value"]
result1 = self.df.melt(id_vars=["id1"])
assert result1.columns.tolist() == ["id1", "variable", "value"]
result2 = self.df.melt(id_vars=["id1", "id2"])
assert result2.columns.tolist() == ["id1", "id2", "variable", "value"]
def test_value_vars(self):
result3 = self.df.melt(id_vars=["id1", "id2"], value_vars="A")
assert len(result3) == 10
result4 = self.df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"])
expected4 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
tm.assert_frame_equal(result4, expected4)
def test_value_vars_types(self):
# GH 15348
expected = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", "value"],
)
for type_ in (tuple, list, np.array):
result = self.df.melt(id_vars=["id1", "id2"], value_vars=type_(("A", "B")))
tm.assert_frame_equal(result, expected)
def test_vars_work_with_multiindex(self):
expected = DataFrame(
{
("A", "a"): self.df1[("A", "a")],
"CAP": ["B"] * len(self.df1),
"low": ["b"] * len(self.df1),
"value": self.df1[("B", "b")],
},
columns=[("A", "a"), "CAP", "low", "value"],
)
result = self.df1.melt(id_vars=[("A", "a")], value_vars=[("B", "b")])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"id_vars, value_vars, col_level, expected",
[
(
["A"],
["B"],
0,
DataFrame(
{
"A": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"CAP": {0: "B", 1: "B", 2: "B"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
}
),
),
(
["a"],
["b"],
1,
DataFrame(
{
"a": {0: 1.067683, 1: -1.321405, 2: -0.807333},
"low": {0: "b", 1: "b", 2: "b"},
"value": {0: -1.110463, 1: 0.368915, 2: 0.08298},
}
),
),
],
)
def test_single_vars_work_with_multiindex(
self, id_vars, value_vars, col_level, expected
):
result = self.df1.melt(id_vars, value_vars, col_level=col_level)
tm.assert_frame_equal(result, expected)
def test_tuple_vars_fail_with_multiindex(self):
# melt should fail with an informative error message if
# the columns have a MultiIndex and a tuple is passed
# for id_vars or value_vars.
tuple_a = ("A", "a")
list_a = [tuple_a]
tuple_b = ("B", "b")
list_b = [tuple_b]
msg = r"(id|value)_vars must be a list of tuples when columns are a MultiIndex"
for id_vars, value_vars in (
(tuple_a, list_b),
(list_a, tuple_b),
(tuple_a, tuple_b),
):
with pytest.raises(ValueError, match=msg):
self.df1.melt(id_vars=id_vars, value_vars=value_vars)
def test_custom_var_name(self):
result5 = self.df.melt(var_name=self.var_name)
assert result5.columns.tolist() == ["var", "value"]
result6 = self.df.melt(id_vars=["id1"], var_name=self.var_name)
assert result6.columns.tolist() == ["id1", "var", "value"]
result7 = self.df.melt(id_vars=["id1", "id2"], var_name=self.var_name)
assert result7.columns.tolist() == ["id1", "id2", "var", "value"]
result8 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", var_name=self.var_name
)
assert result8.columns.tolist() == ["id1", "id2", "var", "value"]
result9 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], var_name=self.var_name
)
expected9 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
"value": (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, "value"],
)
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self):
result10 = self.df.melt(value_name=self.value_name)
assert result10.columns.tolist() == ["variable", "val"]
result11 = self.df.melt(id_vars=["id1"], value_name=self.value_name)
assert result11.columns.tolist() == ["id1", "variable", "val"]
result12 = self.df.melt(id_vars=["id1", "id2"], value_name=self.value_name)
assert result12.columns.tolist() == ["id1", "id2", "variable", "val"]
result13 = self.df.melt(
id_vars=["id1", "id2"], value_vars="A", value_name=self.value_name
)
assert result13.columns.tolist() == ["id1", "id2", "variable", "val"]
result14 = self.df.melt(
id_vars=["id1", "id2"], value_vars=["A", "B"], value_name=self.value_name
)
expected14 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
"variable": ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", "variable", self.value_name],
)
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self):
result15 = self.df.melt(var_name=self.var_name, value_name=self.value_name)
assert result15.columns.tolist() == ["var", "val"]
result16 = self.df.melt(
id_vars=["id1"], var_name=self.var_name, value_name=self.value_name
)
assert result16.columns.tolist() == ["id1", "var", "val"]
result17 = self.df.melt(
id_vars=["id1", "id2"], var_name=self.var_name, value_name=self.value_name
)
assert result17.columns.tolist() == ["id1", "id2", "var", "val"]
result18 = self.df.melt(
id_vars=["id1", "id2"],
value_vars="A",
var_name=self.var_name,
value_name=self.value_name,
)
assert result18.columns.tolist() == ["id1", "id2", "var", "val"]
result19 = self.df.melt(
id_vars=["id1", "id2"],
value_vars=["A", "B"],
var_name=self.var_name,
value_name=self.value_name,
)
expected19 = DataFrame(
{
"id1": self.df["id1"].tolist() * 2,
"id2": self.df["id2"].tolist() * 2,
self.var_name: ["A"] * 10 + ["B"] * 10,
self.value_name: (self.df["A"].tolist() + self.df["B"].tolist()),
},
columns=["id1", "id2", self.var_name, self.value_name],
)
tm.assert_frame_equal(result19, expected19)
df20 = self.df.copy()
df20.columns.name = "foo"
result20 = df20.melt()
assert result20.columns.tolist() == ["foo", "value"]
def test_col_level(self):
res1 = self.df1.melt(col_level=0)
res2 = self.df1.melt(col_level="CAP")
assert res1.columns.tolist() == ["CAP", "value"]
assert res2.columns.tolist() == ["CAP", "value"]
def test_multiindex(self):
res = self.df1.melt()
assert res.columns.tolist() == ["CAP", "low", "value"]
@pytest.mark.parametrize(
"col",
[
pd.Series(pd.date_range("2010", periods=5, tz="US/Pacific")),
pd.Series(["a", "b", "c", "a", "d"], dtype="category"),
pd.Series([0, 1, 0, 0, 0]),
],
)
def test_pandas_dtypes(self, col):
# GH 15785
df = DataFrame(
{"klass": range(5), "col": col, "attr1": [1, 0, 0, 0, 0], "attr2": col}
)
expected_value = pd.concat([pd.Series([1, 0, 0, 0, 0]), col], ignore_index=True)
result = melt(
df, id_vars=["klass", "col"], var_name="attribute", value_name="value"
)
expected = DataFrame(
{
0: list(range(5)) * 2,
1: pd.concat([col] * 2, ignore_index=True),
2: ["attr1"] * 5 + ["attr2"] * 5,
3: expected_value,
}
)
expected.columns = ["klass", "col", "attribute", "value"]
tm.assert_frame_equal(result, expected)
def test_preserve_category(self):
# GH 15853
data = DataFrame({"A": [1, 2], "B": pd.Categorical(["X", "Y"])})
result = melt(data, ["B"], ["A"])
expected = DataFrame(
{"B": pd.Categorical(["X", "Y"]), "variable": ["A", "A"], "value": [1, 2]}
)
tm.assert_frame_equal(result, expected)
def test_melt_missing_columns_raises(self):
# GH-23575
# This test is to ensure that pandas raises an error if melting is
# attempted with column names absent from the dataframe
# Generate data
df = DataFrame(np.random.randn(5, 4), columns=list("abcd"))
# Try to melt with missing `value_vars` column name
msg = "The following '{Var}' are not present in the DataFrame: {Col}"
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['C'\\]")
):
df.melt(["a", "b"], ["C", "d"])
# Try to melt with missing `id_vars` column name
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['A'\\]")):
df.melt(["A", "b"], ["c", "d"])
# Multiple missing
with pytest.raises(
KeyError,
match=msg.format(Var="id_vars", Col="\\['not_here', 'or_there'\\]"),
):
df.melt(["a", "b", "not_here", "or_there"], ["c", "d"])
# Multiindex melt fails if column is missing from multilevel melt
multi = df.copy()
multi.columns = [list("ABCD"), list("abcd")]
with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['E'\\]")):
multi.melt([("E", "a")], [("B", "b")])
# Multiindex fails if column is missing from single level melt
with pytest.raises(
KeyError, match=msg.format(Var="value_vars", Col="\\['F'\\]")
):
multi.melt(["A"], ["F"], col_level=0)
def test_melt_mixed_int_str_id_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"], "b": [1], "d": [2]})
result = melt(df, id_vars=[0, "a"], value_vars=["b", "d"])
expected = DataFrame(
{0: ["foo"] * 2, "a": ["bar"] * 2, "variable": list("bd"), "value": [1, 2]}
)
tm.assert_frame_equal(result, expected)
def test_melt_mixed_int_str_value_vars(self):
# GH 29718
df = DataFrame({0: ["foo"], "a": ["bar"]})
result = melt(df, value_vars=[0, "a"])
expected = DataFrame({"variable": [0, "a"], "value": ["foo", "bar"]})
tm.assert_frame_equal(result, expected)
def test_ignore_index(self):
# GH 17440
df = DataFrame({"foo": [0], "bar": [1]}, index=["first"])
result = melt(df, ignore_index=False)
expected = DataFrame(
{"variable": ["foo", "bar"], "value": [0, 1]}, index=["first", "first"]
)
tm.assert_frame_equal(result, expected)
def test_ignore_multiindex(self):
# GH 17440
index = pd.MultiIndex.from_tuples(
[("first", "second"), ("first", "third")], names=["baz", "foobar"]
)
df = DataFrame({"foo": [0, 1], "bar": [2, 3]}, index=index)
result = melt(df, ignore_index=False)
expected_index = pd.MultiIndex.from_tuples(
[("first", "second"), ("first", "third")] * 2, names=["baz", "foobar"]
)
expected = DataFrame(
{"variable": ["foo"] * 2 + ["bar"] * 2, "value": [0, 1, 2, 3]},
index=expected_index,
)
tm.assert_frame_equal(result, expected)
def test_ignore_index_name_and_type(self):
# GH 17440
index = pd.Index(["foo", "bar"], dtype="category", name="baz")
df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index)
result = melt(df, ignore_index=False)
expected_index = pd.Index(["foo", "bar"] * 2, dtype="category", name="baz")
expected = DataFrame(
{"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]},
index=expected_index,
)
tm.assert_frame_equal(result, expected)
def test_melt_with_duplicate_columns(self):
# GH#41951
df = DataFrame([["id", 2, 3]], columns=["a", "b", "b"])
result = df.melt(id_vars=["a"], value_vars=["b"])
expected = DataFrame(
[["id", "b", 2], ["id", "b", 3]], columns=["a", "variable", "value"]
)
tm.assert_frame_equal(result, expected)
class TestLreshape:
def test_pairs(self):
data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
],
"birthwt": [1766, 3301, 1454, 3139, 4133],
"id": [101, 102, 103, 104, 105],
"sex": ["Male", "Female", "Female", "Female", "Female"],
"visitdt1": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
],
"visitdt2": ["21jan2009", np.nan, "22jan2009", "31dec2008", "03feb2009"],
"visitdt3": ["05feb2009", np.nan, np.nan, "02jan2009", "15feb2009"],
"wt1": [1823, 3338, 1549, 3298, 4306],
"wt2": [2011.0, np.nan, 1892.0, 3338.0, 4575.0],
"wt3": [2293.0, np.nan, np.nan, 3377.0, 4805.0],
}
df = DataFrame(data)
spec = {
"visitdt": [f"visitdt{i:d}" for i in range(1, 4)],
"wt": [f"wt{i:d}" for i in range(1, 4)],
}
result = lreshape(df, spec)
exp_data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"21dec2008",
"11jan2009",
],
"birthwt": [
1766,
3301,
1454,
3139,
4133,
1766,
1454,
3139,
4133,
1766,
3139,
4133,
],
"id": [101, 102, 103, 104, 105, 101, 103, 104, 105, 101, 104, 105],
"sex": [
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
],
"visitdt": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
"21jan2009",
"22jan2009",
"31dec2008",
"03feb2009",
"05feb2009",
"02jan2009",
"15feb2009",
],
"wt": [
1823.0,
3338.0,
1549.0,
3298.0,
4306.0,
2011.0,
1892.0,
3338.0,
4575.0,
2293.0,
3377.0,
4805.0,
],
}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
result = lreshape(df, spec, dropna=False)
exp_data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
],
"birthwt": [
1766,
3301,
1454,
3139,
4133,
1766,
3301,
1454,
3139,
4133,
1766,
3301,
1454,
3139,
4133,
],
"id": [
101,
102,
103,
104,
105,
101,
102,
103,
104,
105,
101,
102,
103,
104,
105,
],
"sex": [
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Female",
],
"visitdt": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
"21jan2009",
np.nan,
"22jan2009",
"31dec2008",
"03feb2009",
"05feb2009",
np.nan,
np.nan,
"02jan2009",
"15feb2009",
],
"wt": [
1823.0,
3338.0,
1549.0,
3298.0,
4306.0,
2011.0,
np.nan,
1892.0,
3338.0,
4575.0,
2293.0,
np.nan,
np.nan,
3377.0,
4805.0,
],
}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = lreshape(df, spec, dropna=False, label="foo")
spec = {
"visitdt": [f"visitdt{i:d}" for i in range(1, 3)],
"wt": [f"wt{i:d}" for i in range(1, 4)],
}
msg = "All column lists must be same length"
with pytest.raises(ValueError, match=msg):
lreshape(df, spec)
class TestWideToLong:
def test_simple(self):
np.random.seed(123)
x = np.random.randn(3)
df = DataFrame(
{
"A1970": {0: "a", 1: "b", 2: "c"},
"A1980": {0: "d", 1: "e", 2: "f"},
"B1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A": ["a", "b", "c", "d", "e", "f"],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_stubs(self):
# GH9204 wide_to_long call should not modify 'stubs' list
df = DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]])
df.columns = ["id", "inc1", "inc2", "edu1", "edu2"]
stubs = ["inc", "edu"]
wide_to_long(df, stubs, i="id", j="age")
assert stubs == ["inc", "edu"]
def test_separating_character(self):
# GH14779
np.random.seed(123)
x = np.random.randn(3)
df = DataFrame(
{
"A.1970": {0: "a", 1: "b", 2: "c"},
"A.1980": {0: "d", 1: "e", 2: "f"},
"B.1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B.1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A": ["a", "b", "c", "d", "e", "f"],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".")
tm.assert_frame_equal(result, expected)
def test_escapable_characters(self):
np.random.seed(123)
x = np.random.randn(3)
df = DataFrame(
{
"A(quarterly)1970": {0: "a", 1: "b", 2: "c"},
"A(quarterly)1980": {0: "d", 1: "e", 2: "f"},
"B(quarterly)1970": {0: 2.5, 1: 1.2, 2: 0.7},
"B(quarterly)1980": {0: 3.2, 1: 1.3, 2: 0.1},
"X": dict(zip(range(3), x)),
}
)
df["id"] = df.index
exp_data = {
"X": x.tolist() + x.tolist(),
"A(quarterly)": ["a", "b", "c", "d", "e", "f"],
"B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2],
}
expected = DataFrame(exp_data)
expected = expected.set_index(["id", "year"])[
["X", "A(quarterly)", "B(quarterly)"]
]
result = wide_to_long(df, ["A(quarterly)", "B(quarterly)"], i="id", j="year")
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
"""Copyright (c) Facebook, Inc. and its affiliates."""
# pylint: disable=unused-argument,too-many-statements,unused-variable
import functools
import glob
import os
from collections import defaultdict
from pathlib import Path
from typing import List, Optional, Union
import altair as alt
import altair_saver
import numpy as np
import pandas as pd
import typer
from altair.expr import datum
from functional import pseq, seq
from pedroai.io import (
read_json,
read_jsonlines,
requires_file,
requires_files,
safe_file,
)
from pedroai.math import to_precision
from rich.console import Console
from leaderboard.config import conf
from leaderboard.data import (
IrtParsed,
LeaderboardPredictions,
load_squad_submissions,
load_squad_v2,
)
alt.data_transformers.disable_max_rows()
PAPERS_ROOT = Path(os.environ.get("PAPERS_ROOT", "./"))
AUTO_FIG = PAPERS_ROOT / "auto_fig"
COMMIT_AUTO_FIGS = PAPERS_ROOT / "commit_auto_figs"
BASE_SIZE = 150
plot_app = typer.Typer()
console = Console()
def save_chart(chart: alt.Chart, base_path: Union[str, Path], filetypes: List[str]):
if isinstance(base_path, Path):
base_path = str(base_path)
for t in filetypes:
path = base_path + "." + t
if t in ("svg", "pdf"):
method = "node"
else:
method = None
console.log(f"Saving to: {path}")
altair_saver.save(chart, safe_file(path), method=method)
def generate_ablation_files():
ablation_files = {}
for path in glob.glob("data/linear/**/**/**/report.json"):
fields = path.split("/")
irt_family = fields[2]
irt_type = fields[3]
features = fields[4]
if irt_type in ("1PL", "2PL"):
continue
ablation_files[(irt_family, irt_type, features)] = Path(path)
return ablation_files
PLOTS = {}
def register_plot(name: str):
def decorator(func):
PLOTS[name] = func
return func
return decorator
ABLATION_FILES = generate_ablation_files()
def generate_irt_files():
irt_files = {}
for model_type, evaluations in conf["irt"]["squad"]["dev"]["pyro"].items():
for eval_type in ("full", "heldout"):
irt_files[(model_type, eval_type)] = Path(evaluations[eval_type]) / "report.json"
return irt_files
IRT_FILES = generate_irt_files()
def init_score():
return {"tie": 0, "win": 0, "loss": 0}
def run_stats_tournament(fold: str):
test_results = {}
for test in ["mcnemar", "see", "sem", "student_t", "wilcoxon"]:
stats = read_json(f"data/stats/fold={fold}/sampling=random/percent=100/{test}.json")
match_results = defaultdict(init_score)
alpha = 0.01
for r in stats["results"]:
model_a = r["model_a"]
model_b = r["model_b"]
if r["pvalue"] is not None and r["pvalue"] < alpha:
if r["score_a"] > r["score_b"]:
match_results[model_a]["win"] += 1
match_results[model_b]["loss"] += 1
else:
match_results[model_a]["loss"] += 1
match_results[model_b]["win"] += 1
else:
match_results[model_a]["tie"] += 1
match_results[model_b]["tie"] += 1
test_results[test] = match_results
return test_results
@register_plot("rank_correlation_table")
@requires_file(conf["squad"]["dev_to_test"])
def rank_correlation_table(filetypes: List[str], commit: bool = False, include_test: bool = True):
irt_model = "3PL"
dev_irt_params = IrtParsed.from_irt_file(
Path(conf["irt"]["squad"]["dev"]["pyro"][irt_model]["full"]) / "parameters.json"
)
dev_predictions = LeaderboardPredictions.parse_file(
conf["squad"]["submission_predictions"]["dev"]
)
dev_id_to_subject = load_squad_submissions(dev_predictions)
console.log("N Dev IRT", len(dev_irt_params.model_stats))
stats_results = run_stats_tournament("dev")
mcnemar_results = stats_results["mcnemar"]
see_results = stats_results["see"]
student_t_results = stats_results["student_t"]
sem_results = stats_results["sem"]
if include_test:
mapping = read_json(conf["squad"]["dev_to_test"])
dev_to_test = mapping["dev_to_test"]
test_irt_params = IrtParsed.from_irt_file(
Path(conf["irt"]["squad"]["test"]["pyro"][irt_model]["full"]) / "parameters.json"
)
console.log("N Test IRT", len(test_irt_params.model_stats))
test_stats_results = run_stats_tournament("test")
test_mcnemar_results = test_stats_results["mcnemar"]
test_see_results = test_stats_results["see"]
test_student_t_results = test_stats_results["student_t"]
test_sem_results = test_stats_results["sem"]
else:
mapping = None
dev_to_test = None
test_irt_params = None
test_stats_results = None
test_mcnemar_results = None
test_see_results = None
test_student_t_results = None
test_sem_results = None
rows = []
n_test = 0
n_dev = 0
for subject_id in dev_id_to_subject.keys():
subject = dev_id_to_subject[subject_id]
entry = {
"subject_id": subject_id,
"name": subject["name"],
"dev_em": subject["dev_em"],
"test_em": subject["test_em"],
"dev_skill": dev_irt_params.model_stats[subject_id].skill,
# "dev_mcnemar": mcnemar_results[subject_id]["win"],
# "dev_see": see_results[subject_id]["win"],
# "dev_student_t": student_t_results[subject_id]["win"],
# "dev_sem": sem_results[subject_id]["win"],
}
n_dev += 1
if include_test:
if subject_id in dev_to_test:
test_subject_id = dev_to_test[subject_id]
if test_subject_id in test_irt_params.model_stats:
entry["test_skill"] = test_irt_params.model_stats[test_subject_id].skill
# entry["test_mcnemar"] = test_mcnemar_results[test_subject_id]["win"]
# entry["test_see"] = test_see_results[test_subject_id]["win"]
# entry["test_student_t"] = test_student_t_results[test_subject_id][
# "win"
# ]
# entry["test_sem"] = test_sem_results[test_subject_id]["win"]
n_test += 1
rows.append(entry)
console.log("N Dev", n_dev, "N Test", n_test)
df = pd.DataFrame(rows).dropna(axis=0)
console.log(df)
name_mapping = {
"dev_em": r"EM$_{\text{dev}}$",
"test_em": r"EM$_{\text{test}}$",
"dev_skill": r"Ability$_{\text{dev}}$",
"test_skill": r"Ability$_{\text{test}}$",
}
correlations = df.corr(method="kendall")
correlations.to_pickle("/tmp/leaderboard_correlations.pickle")
console.log(correlations)
print(
correlations.applymap(lambda n: f"${to_precision(n, 3)}$")
.rename(columns=name_mapping, index=name_mapping)
.to_latex(column_format="l" + len(name_mapping) * "r", escape=False)
)
@register_plot("sampling_stability")
def sample_stability_plot(filetypes: List[str], commit: bool = False):
input_dir = Path(conf["stability"]["sampling"])
random_df = pd.read_json(input_dir / "random_df.json")
irt_df = pd.read_json(input_dir / "irt_df.json")
info_df = pd.read_json(input_dir / "info_df.json")
method_names = {
"dev_high_disc_to_test": "High Discrimination",
"dev_high_diff_to_test": "High Difficulty",
"dev_high_disc_diff_to_test": "High Disc + Diff",
"dev_info_to_test": "High Information",
"dev_random_to_test": "Random",
}
def format_df(dataframe):
return dataframe.assign(
sampling_method=dataframe["variable"].map(lambda v: method_names[v])
)
x_scale = alt.X("trial_size", title="Development Set Sample Size", scale=alt.Scale(type="log"))
y_scale = alt.Scale(zero=False)
color_scale = alt.Color(
"sampling_method",
title="Sampling Method",
legend=alt.Legend(orient="bottom-right", fillColor="white", padding=5, strokeColor="gray"),
sort=[
"High Disc + Diff",
"High Information",
"High Discrimination",
"High Difficulty",
"Random",
],
)
random_line = (
alt.Chart(format_df(random_df))
.mark_line()
.encode(
x=x_scale,
y=alt.Y("mean(value)", scale=y_scale, title="Correlation to Test Rank"),
color=color_scale,
)
)
random_band = (
alt.Chart(format_df(random_df))
.mark_errorband(extent="ci")
.encode(x=x_scale, y=alt.Y("value", title="", scale=y_scale), color=color_scale)
)
determ_df = pd.concat([irt_df, info_df])
irt_line = (
alt.Chart(format_df(determ_df))
.mark_line()
.encode(x=x_scale, y=alt.Y("value", title="", scale=y_scale), color=color_scale)
)
font_size = 18
chart = (
(random_band + random_line + irt_line)
.configure_axis(labelFontSize=font_size, titleFontSize=font_size)
.configure_legend(
labelFontSize=font_size, titleFontSize=font_size, symbolLimit=0, labelLimit=0,
)
.configure_header(labelFontSize=font_size)
.configure(padding=0)
)
if commit:
save_chart(chart, COMMIT_AUTO_FIGS / "sampling_rank", filetypes)
else:
save_chart(chart, AUTO_FIG / "sampling_rank", filetypes)
@register_plot("cat_sampling_stability")
def cat_sample_stability_plot(filetypes: List[str], commit: bool = False):
input_dir = Path(conf["stability"]["cat_sampling"])
random_df = pd.read_json(input_dir / "random_df.json")
irt_df = pd.read_json(input_dir / "irt_df.json")
info_df = pd.read_json(input_dir / "info_df.json")
method_names = {
"dev_high_disc_to_test": "High Discrimination",
"dev_high_diff_to_test": "High Difficulty",
"dev_high_disc_diff_to_test": "High Disc + Diff",
"dev_info_to_test": "High Information",
"dev_random_to_test": "Random",
}
def format_df(dataframe):
return dataframe.assign(
sampling_method=dataframe["variable"].map(lambda v: method_names[v])
)
x_scale = alt.X("trial_size", title="Development Set Sample Size", scale=alt.Scale(type="log"))
y_scale = alt.Scale(zero=False)
color_scale = alt.Color(
"sampling_method",
title="Sampling Method",
legend=alt.Legend(orient="bottom-right", fillColor="white", padding=5, strokeColor="gray"),
sort=[
"High Information",
"High Discrimination",
"High Disc + Diff",
"High Difficulty",
"Random",
],
)
random_line = (
alt.Chart(format_df(random_df))
.mark_line()
.encode(
x=x_scale,
y=alt.Y("mean(value)", scale=y_scale, title="Correlation to Test Rank"),
color=color_scale,
)
)
random_band = (
alt.Chart(format_df(random_df))
.mark_errorband(extent="ci")
.encode(x=x_scale, y=alt.Y("value", title="", scale=y_scale), color=color_scale)
)
determ_df = pd.concat([irt_df, info_df])
irt_line = (
alt.Chart(format_df(determ_df))
.mark_line()
.encode(x=x_scale, y=alt.Y("value", title="", scale=y_scale), color=color_scale)
)
font_size = 18
chart = (
(random_band + random_line + irt_line)
.configure_axis(labelFontSize=font_size, titleFontSize=font_size)
.configure_legend(
labelFontSize=font_size, titleFontSize=font_size, symbolLimit=0, labelLimit=0,
)
.configure_header(labelFontSize=font_size)
.configure(padding=0)
)
if commit:
save_chart(chart, COMMIT_AUTO_FIGS / "cat_sampling_rank", filetypes)
else:
save_chart(chart, AUTO_FIG / "cat_sampling_rank", filetypes)
def label_experiment(label):
if label.startswith("test_"):
return "Dev Sample to Test"
else:
return "Dev Sample to Dev Sample"
def label_sig(fold: str):
if fold == "dev":
return "Dev Sample to Dev Sample"
elif fold == "test":
return "Dev Sample to Test"
else:
raise ValueError(f"Invalid fold: {fold}")
@functools.lru_cache()
def load_test_irt():
test_irt_parsed = IrtParsed.from_irt_file(
Path(conf["irt"]["squad"]["test"]["pyro"]["3PL"]["full"]) / "parameters.json"
)
test_preds = LeaderboardPredictions.parse_file(conf["squad"]["submission_predictions"]["test"])
mapping = read_json(conf["squad"]["dev_to_test"])
dev_to_test = mapping["dev_to_test"]
def get_test_irt(dev_id):
if dev_id in dev_to_test:
test_id = dev_to_test[dev_id]
if test_id in test_irt_parsed.model_stats:
return test_irt_parsed.model_stats[test_id].skill
else:
return None
else:
return None
def get_test_classical(dev_id):
if dev_id in dev_to_test:
test_id = dev_to_test[dev_id]
if test_id in test_preds.model_scores:
return test_preds.model_scores[test_id]["exact_match"]
else:
return None
else:
return None
return get_test_irt, get_test_classical
def rank_compute_bootstrap_ci(data_path: str, n_trials: int = 1000, fold: str = "dev"):
"""Given stability experiment, compute bootstrapped
confidence intervals, and check if correlations are above 95%
interval.
Args:
data_path (str): Path to dataframe stored in feather format with experiment
"""
df = pd.read_feather(data_path)
size = df["size"].iloc[0]
trial_id = df["trial_id"].iloc[0]
if fold == "test":
get_test_irt, get_test_classical = load_test_irt()
df["b_irt"] = df["subject_id"].map(get_test_irt)
df["b_classical"] = df["subject_id"].map(get_test_classical)
df = df.dropna(0)
real_corr = df.corr(method="kendall")
# Due to not implementing identifiability, IRT scores may be flipped
# Detect that and adjust as necessary
if real_corr["a_irt"].a_classical < 0:
df["a_irt"] = -df["a_irt"]
if real_corr["b_irt"].b_classical < 0:
df["b_irt"] = -df["b_irt"]
real_corr = df.corr(method="kendall")
corr_diff = real_corr["a_irt"].b_irt - real_corr["a_classical"].b_classical
a_classical_scores = df.a_classical.to_numpy()
a_irt_scores = df.a_irt.to_numpy()
indices = np.arange(0, len(a_classical_scores))
# Build up a distribution of score differences
diff_dist = []
# Simulate a bunch of times
n_subjects = len(a_classical_scores)
for _ in range(n_trials):
# Create a new similar DF, except sample with replacement one set of rankings
# Be sure to keep pairs of irt/classical scores together
sample_indices = np.random.choice(indices, n_subjects, replace=True)
sample_classical = a_classical_scores[sample_indices]
sample_irt = a_irt_scores[sample_indices]
sample_df = pd.DataFrame(
{
"subject_id": df["subject_id"],
# I'm not sure doing replacement is correct
# Also not sure if n=161 is correct, seems odd,
# but I'd be worried if I did only 20 that
# the distribution of differences might be different
"a_classical": sample_classical,
"a_irt": sample_irt,
# Keep one ranking the same
"b_classical": df["b_classical"],
"b_irt": df["b_irt"],
}
)
sample_corr = sample_df.corr(method="kendall")
# Grab correlations
irt_corr = sample_corr.loc["a_irt"].b_irt
classical_corr = sample_corr.loc["a_classical"].b_classical
# Record the difference
diff_dist.append(irt_corr - classical_corr)
diff_df = pd.DataFrame({"diff": diff_dist})
# Two tailed test, so divide by two
alpha = 1 - 0.95
lower, upper = diff_df["diff"].quantile([alpha, 1 - alpha])
# significant = bool(corr_diff < lower or upper < corr_diff)
significant = bool(upper < corr_diff)
p_value = 1 - ((diff_df["diff"] < corr_diff).sum() / n_trials)
return {
"significant": significant,
"p_value": float(p_value),
"diff": float(corr_diff),
"irt_corr": float(real_corr["a_irt"].b_irt),
"classical_corr": float(real_corr["a_classical"].b_classical),
"trial_size": int(size),
"trial_id": int(trial_id),
"lower": float(lower),
"upper": float(upper),
"alpha": alpha,
"diff_dist": diff_dist,
}
def process_trial_group(trial_size, trials):
diff_dist = []
for t in trials:
diff_dist.extend(t["diff_dist"])
diff_dist = np.array(diff_dist)
for t in trials:
p_value = 1 - (diff_dist < t["diff"]).mean()
t["total_p_value"] = p_value
yield t
def get_cached_rank_stability_sig(force: bool = False, n_trials: bool = 1000):
input_dir = Path(conf["stability"]["ranking"])
output_path = Path(conf["stability"]["ranking_sig"])
if output_path.exists() and not force:
console.log("Cached ranking stability found")
return pd.read_feather(output_path)
console.log("Cached ranking stability not found, computing...")
console.log("Computing dev results")
dev_results = (
pseq(input_dir.glob("*.feather"))
.map(lambda x: rank_compute_bootstrap_ci(x, n_trials=n_trials, fold="dev"))
.list()
)
console.log("Computing test results")
test_results = (
pseq(input_dir.glob("*.feather"))
.map(lambda x: rank_compute_bootstrap_ci(x, n_trials=n_trials, fold="test"))
.list()
)
dev_processed = (
seq(dev_results)
.group_by(lambda x: x["trial_size"])
.smap(process_trial_group)
.flatten()
.list()
)
test_processed = (
seq(test_results)
.group_by(lambda x: x["trial_size"])
.smap(process_trial_group)
.flatten()
.list()
)
dev_df = pd.DataFrame(dev_processed).drop("diff_dist", axis=1)
dev_df["fold"] = "dev"
test_df = pd.DataFrame(test_processed).drop("diff_dist", axis=1)
test_df["fold"] = "test"
df = pd.concat([dev_df, test_df]).reset_index()
df["experiment"] = df["fold"].map(label_sig)
df.to_feather(output_path)
return df
@register_plot("rank_sig")
def rank_stability_sig(filetypes: List[str], commit: bool = False, n_trials: int = 1000):
df = get_cached_rank_stability_sig(force=False, n_trials=n_trials)
font_size = 14
chart = (
# The plot gets too crowded if we include below 100, where
# we have a higher density of experiments than 1 per 100 sizes
alt.Chart(df[df["trial_size"] > 99])
.mark_boxplot(size=5)
.encode(
x=alt.X("trial_size", title="Sample Size"),
y=alt.Y("total_p_value", title="P-Value", axis=alt.Axis(tickCount=11),),
)
.properties(width=400, height=150)
.facet(alt.Column("experiment", title=""))
.configure_header(titleFontSize=font_size, labelFontSize=font_size)
)
if commit:
save_chart(chart, COMMIT_AUTO_FIGS / "ranking_stability_significance", filetypes)
else:
save_chart(chart, AUTO_FIG / "ranking_stability_significance", filetypes)
@register_plot("stability")
def rank_stability_plot(filetypes: List[str], commit: bool = False):
test_irt_parsed = IrtParsed.from_irt_file(
Path(conf["irt"]["squad"]["test"]["pyro"]["3PL"]["full"]) / "parameters.json"
)
test_preds = LeaderboardPredictions.parse_file(conf["squad"]["submission_predictions"]["test"])
mapping = read_json(conf["squad"]["dev_to_test"])
dev_to_test = mapping["dev_to_test"]
df = create_rank_stability_df(
dev_to_test=dev_to_test, test_preds=test_preds, test_irt_parsed=test_irt_parsed
)
names = {
"abs_irt_corr": "IRT to IRT",
"classical_corr": "Acc to Acc",
"test_classical_sample_classical_corr": "Acc to Acc",
"test_classical_sample_irt_corr": "IRT to Acc",
"test_irt_sample_classical_corr": "Acc to IRT",
"test_irt_sample_irt_corr": "IRT to IRT",
}
color_order = ["IRT to IRT", "Acc to Acc", "IRT to Acc", "Acc to IRT"]
melt_df = df.drop(columns=["irt_corr"]).melt(id_vars=["trial_size", "trial_id"]).dropna(axis=0)
excluded = ["IRT to Acc", "Acc to IRT"]
console.log(melt_df.head())
melt_df["correlation"] = melt_df["variable"].map(lambda v: names[v])
melt_df = melt_df[melt_df["correlation"].map(lambda v: v not in excluded)]
melt_df["experiment"] = melt_df["variable"].map(label_experiment)
base = alt.Chart(melt_df).encode(
x=alt.X(
"trial_size",
title="Development Set Sample Size",
scale=alt.Scale(type="log", base=2, domain=[16, 6000]),
),
color=alt.Color(
"correlation",
title="Correlation",
scale=alt.Scale(scheme="category10"),
sort=color_order,
legend=alt.Legend(
symbolOpacity=1,
symbolType="circle",
symbolStrokeWidth=3,
orient="none",
legendX=570,
legendY=105,
fillColor="white",
strokeColor="gray",
padding=5,
),
),
)
y_title = "Kendall Rank Correlation"
line = base.mark_line(opacity=0.7).encode(
y=alt.Y("mean(value):Q", scale=alt.Scale(zero=False), title=y_title),
)
band = base.mark_errorband(extent="ci").encode(
y=alt.Y("value", title=y_title, scale=alt.Scale(zero=False)),
color=alt.Color("correlation", sort=color_order),
)
font_size = 14
chart = (
(band + line)
.properties(width=300, height=170)
.facet(alt.Column("experiment", title=""))
.configure_header(titleFontSize=font_size, labelFontSize=font_size)
.resolve_axis(y="independent")
.configure(padding=0)
)
if commit:
save_chart(chart, COMMIT_AUTO_FIGS / "stability_simulation_corr", filetypes)
else:
save_chart(chart, AUTO_FIG / "stability_simulation_corr", filetypes)
def create_rank_stability_df(*, test_irt_parsed, dev_to_test, test_preds):
def get_test_irt(dev_id):
if dev_id in dev_to_test:
test_id = dev_to_test[dev_id]
if test_id in test_irt_parsed.model_stats:
return test_irt_parsed.model_stats[test_id].skill
else:
return None
else:
return None
def get_test_classical(dev_id):
if dev_id in dev_to_test:
test_id = dev_to_test[dev_id]
if test_id in test_preds.model_scores:
return test_preds.model_scores[test_id]["exact_match"]
else:
return None
else:
return None
rows = []
trials = {}
input_dir = Path(conf["stability"]["ranking"])
for path in input_dir.glob("*.feather"):
exp_df = pd.read_feather(path)
exp_df["abs_a_irt"] = exp_df["a_irt"].abs()
exp_df["abs_b_irt"] = exp_df["b_irt"].abs()
exp_df["test_classical"] = exp_df["subject_id"].map(get_test_classical)
exp_df["test_irt"] = exp_df["subject_id"].map(get_test_irt)
# Drop the rows missing test data
exp_df = exp_df.dropna(0)
size = exp_df.iloc[0]["size"]
trial_id = exp_df.iloc[0].trial_id
trials[(size, trial_id)] = exp_df
corr = exp_df.corr(method="kendall")
rows.append(
{
"trial_size": size,
"trial_id": trial_id,
"irt_corr": corr.loc["a_irt"].b_irt,
"classical_corr": corr.loc["a_classical"].b_classical,
"test_irt_sample_irt_corr": abs(corr.loc["test_irt"].a_irt),
"test_irt_sample_classical_corr": abs(corr.loc["test_irt"].a_classical),
"test_classical_sample_irt_corr": abs(corr.loc["test_classical"].a_irt),
"test_classical_sample_classical_corr": abs(corr.loc["test_classical"].a_classical),
}
)
rows.append(
{
"trial_size": size,
"trial_id": trial_id,
"irt_corr": None,
"classical_corr": None,
"test_irt_sample_irt_corr": abs(corr.loc["test_irt"].b_irt),
"test_irt_sample_classical_corr": abs(corr.loc["test_irt"].b_classical),
"test_classical_sample_irt_corr": abs(corr.loc["test_classical"].b_irt),
"test_classical_sample_classical_corr": abs(corr.loc["test_classical"].b_classical),
}
)
df =
|
pd.DataFrame(rows)
|
pandas.DataFrame
|
import tqdm
import torch
import torch.nn.functional as F
import pandas as pd
from collections import deque
from dfp import Transition
class Agent:
def __init__(self, env, collect_policy, replay_buffer, model, optimizer, scheduler=None):
self.env = env
self.collect_policy = collect_policy
self.replay_buffer = replay_buffer
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.train_metrics = deque(maxlen=10)
self.collect_metrics = deque(maxlen=10)
# global vars / counters
self.env_steps = 0
self.train_steps = 0
self.episodes = 0
self.episode_rewards = 0
self.episode_steps = 0
# state
self.obs = None
def fill_buffer(self):
env = self.env
action_space = env.action_space
self.obs = env.reset()
pbar = tqdm.tqdm(total=self.replay_buffer.capacity)
while not self.replay_buffer.is_full():
action = action_space.sample() # random policy
next_obs, reward, done, info = env.step(action)
transition = Transition(self.obs, action, reward, next_obs, done)
self.replay_buffer.push(transition)
self.obs = next_obs
if done:
self.obs = env.reset()
pbar.update(1)
pbar.close()
def env_step(self):
env = self.env
with torch.no_grad():
action = self.collect_policy(self.obs)
next_obs, reward, done, info = env.step(action)
transition = Transition(self.obs, action, reward, next_obs, done)
self.replay_buffer.push(transition)
self.obs = next_obs
# update counters
self.env_steps += 1
self.episode_steps += 1
self.episode_rewards += reward
if done:
info['ep_return'] = self.episode_rewards
info['ep_length'] = self.episode_steps
self.collect_metrics.append(info)
# update counters
self.episodes += 1
self.episode_rewards = 0
self.episode_steps = 0
# have to reset
self.obs = env.reset()
def train_step(self, batch_size):
self.model.train()
train_obs, actions, targets, target_masks = self.replay_buffer.sample(batch_size)
predictions = self.model(train_obs, actions)
# Replace invalid (exceeding episode length) time steps to cancel their gradients
# NOTE: We replace the invalid targets with copies of the predictions.
pred_clones = predictions.clone().detach()
mask_invalid = ~target_masks
targets[mask_invalid] = pred_clones[mask_invalid]
loss = F.mse_loss(predictions, targets, reduction='sum')
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.scheduler is not None:
self.scheduler.step()
loss_sum = loss.item()
full_loss = loss_sum / target_masks.numel()
valid_loss = loss_sum / target_masks.sum().item()
self.train_metrics.append(dict(pred_loss=full_loss, valid_pred_loss=valid_loss))
self.train_steps += 1
self.model.eval()
@property
def counters(self):
return dict(env_steps=self.env_steps, episodes=self.episodes, train_steps=self.train_steps)
def gather_metrics(self):
d1 =
|
pd.DataFrame.from_records(self.collect_metrics)
|
pandas.DataFrame.from_records
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import pickle
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from transformers import BertTokenizer, BertConfig, BertForSequenceClassification
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
# map_24h_internal_rule={"0":"0","1":"5","2":"1","3":"2","4":"3","5":"4","6":"6","7":"7","8":"8"}
class ModelLoad():
def __init__(self):
# configuration
self.ROOT_FOLDER = '/import/cogsci/ravi/codes/comment-filter-24h-internal-rule/services/web/project/'
print('ROOT_FOLDER',self.ROOT_FOLDER)
# Load a trained model that you have fine-tuned
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.bert_model = 'EMBEDDIA/crosloengual-bert' #EMBEDDIA Model
DIRECTORIES = {
'ml_hate_speech_path': os.path.join(self.ROOT_FOLDER, 'models/ml_hate_speech_classifier', self.bert_model)
}
self.model_file = os.path.join(DIRECTORIES['ml_hate_speech_path'], 'pytorch_model.bin')
self.model_file_nb = os.path.join(DIRECTORIES['ml_hate_speech_path'], 'nb_model_bigram.sav')
print('model_file',self.model_file)
print('model_dir',os.listdir(os.path.join(self.ROOT_FOLDER, 'models')))
print('model_dir_s',os.listdir(DIRECTORIES['ml_hate_speech_path']))
print(os.path.isfile(self.model_file))
if not os.path.isfile(self.model_file):
print('Please Download the model ...')
exit(0)
config_file = DIRECTORIES['ml_hate_speech_path']+'/config.json'
token_file = DIRECTORIES['ml_hate_speech_path']+'/vocab.txt'
config = BertConfig.from_pretrained(config_file, num_labels=9)
self.tokenizer = BertTokenizer.from_pretrained(token_file, do_lower_case=False)
print('Loading model ...', self.model_file)
self.model = BertForSequenceClassification.from_pretrained(self.model_file, config=config)
self.model_nb = pickle.load(open(self.model_file_nb, 'rb'))
self.model.eval()
self.model.to(self.device)
def load_models(self):
#Return Model
return self.model,self.tokenizer
def get_model(self):
# Return Model
return self.model
def get_model_nb(self):
# Return Model
return self.model_nb
def get_tokenizer(self):
return self.tokenizer
def get_device(self):
return self.device
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _read_csv(cls, input_file, quotechar=None):
"""Reads a comma separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
reader = csv.reader(f, delimiter=",", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _read_data(cls, data):
"""Reads a tab separated value file."""
lines = []
for line in data:
lines.append(line)
# print("Lines: ", lines)
return lines
class SemEvalProcessor(DataProcessor):
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "offenseval-training-v1_train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "offenseval-training-v1_train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "offenseval-training-v1_eval.tsv")), "dev")
def get_test_examples(self, data):
return self._create_examples(
self._read_data(data), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
if set_type == 'dev':
for (i, line) in enumerate(lines):
#if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
text_b = None
label = "0" if line[1] == 'OFF' else "1"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
elif set_type == 'train':
for (i, line) in enumerate(lines):
#if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
text_b = None
label = "0" if line[1] == 'OFF' else "1"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
elif set_type == 'test':
for (i, line) in enumerate(lines):
#if i == 0:
# continue
guid = str(i)
text_a = line
text_b = None
label = "0"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
if label_list:
label_map = {label : i for i, label in enumerate(label_list)}
else:
label_map = {"0": i for i in range(len(examples))}
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
# print(tokens)
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0 - x
def predict_nb(text,model_nb):
predicted = model_nb.predict(text)
predicted_prob = model_nb.predict_probs(text)
return predicted, predicted_prob
def predict_ml_hs(data, tokenizer, model, model_nb, device):
#local_rank = -1
max_seq_length = 128
eval_batch_size = 1
task_name = 'semeval'
processors = {
"semeval": SemEvalProcessor,
}
processor = processors[task_name]()
test_examples = processor.get_test_examples(data)
test_features = convert_examples_to_features(
test_examples, None, max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)
test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=eval_batch_size)
model.eval()
all_preds = []
all_certainities = []
all_details = []
all_cassifier = []
###### NB prediction ###########
nb_predicts = model_nb.predict(data)
nb_predicts_prob = model_nb.predict_proba(data)
counter = 0
softmax = torch.nn.Softmax(dim=1)
for input_ids, input_mask, segment_ids, label_ids in test_dataloader:
# pred = nb_predicts[counter]
# certainities = nb_predicts_prob[counter]
#Only if NB is not predicting Rule 1.
#TODO: Need to remove NB in future, no real benifit
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
with torch.no_grad():
outputs = model(input_ids, token_type_ids=None, attention_mask=input_mask)
logits = outputs.logits
logits = softmax(logits)
logits = logits.detach().cpu().numpy()
preds = np.argmax(logits, axis=1).tolist()
certainities = np.amax(logits, axis=1).tolist()
details = {}
#TODO: check something looks wrong here
for idx in range(0,9):
logit = logits[:,idx][0]
if idx == 0:
details["PASS"]=logit
else:
rule = "RULE-"+str(idx)
details[rule]=logit
# else:
# rule = "RULE-1"
# details[rule]=logit
if len(certainities)>1:
print(certainities)
all_preds.extend(preds)
all_certainities.extend(certainities)
all_details.append(details)
counter = counter +1
# print(len(all_preds), len(all_certainities), len(all_details))
preds_class = []
rule = []
for i in range(len(all_preds)):
tmp_pred = all_preds[i]
if str(tmp_pred) == '0':
pred = 'PASS'
else:
pred = 'FAIL'
preds_class.append(pred)
rule.append(tmp_pred)
# print(len(preds_class), len(all_certainities), len(all_details))
return preds_class, all_certainities, all_details, rule
def get_dataloader(data,batch_size):
examples = processor.get_test_examples(data)
features = convert_examples_to_features(
examples, None, max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
test_sampler = SequentialSampler(test_data)
data_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)
return data_dataloader
def train_ml_hs(train_data,val_data, tokenizer, model, model_nb, device):
#local_rank = -1
max_seq_length = 256
batch_size = 8
task_name = 'semeval'
processors = {
"semeval": SemEvalProcessor,
}
processor = processors[task_name]()
model.train()
train_dataloader = get_dataloader(train_data,batch_size)
val_dataloader = get_dataloader(val_data,batch_size)
model_load = None
def predict(data):
global model_load
if model_load is None:
model_load = ModelLoad()
return predict_ml_hs(data, model_load.get_tokenizer(), model_load.get_model(), model_load.get_model_nb(), model_load.get_device())
if __name__ == "__main__":
import pandas as pd
data = pd.read_csv('/import/cogsci/ravi/datasets/24sata/csv_embeddia_export.csv')
content = data.content.values.tolist()
text = content
# c = 1064468
# for i, t in enumerate(data[data['infringed_on_rule'] == 1].content.values):
# text.append(t)
# if i > 2:
# break
label, confidence,detail, rule = predict(text)
print(len(text), len(label), len(confidence), len(detail))
df =
|
pd.DataFrame({'content':text, 'block':label,'embeddia_rule':rule, 'result':confidence, 'details':detail})
|
pandas.DataFrame
|
"""
Copyright (c) 2021, FireEye, Inc.
Copyright (c) 2021 <NAME>
"""
import os
# noinspection PyUnresolvedReferences,PyPackageRequirements
import ember
import joblib
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.preprocessing import KBinsDiscretizer
from mw_backdoor import ember_feature_utils, constants
# FEATURES
def load_features(feats_to_exclude, dataset='ember', selected=False, vrb=False):
""" Load the features and exclude those in list.
:param feats_to_exclude: (list) list of features to exclude
:param dataset: (str) name of the dataset being used
:param selected: (bool) if true load only Lasso selected features for Drebin
:param vrb: (bool) if true print debug strings
:return: (dict, array, dict, dict) feature dictionaries
"""
if dataset == 'ember':
feature_names = np.array(ember_feature_utils.build_feature_names())
non_hashed = ember_feature_utils.get_non_hashed_features()
hashed = ember_feature_utils.get_hashed_features()
elif dataset == 'pdf' or dataset == 'ogcontagio':
feature_names, non_hashed, hashed = load_pdf_features()
elif dataset == 'drebin':
feature_names, non_hashed, hashed, feasible = load_drebin_features(feats_to_exclude, selected)
else:
raise NotImplementedError('Dataset {} not supported'.format(dataset))
feature_ids = list(range(feature_names.shape[0]))
# The `features` dictionary will contain only numerical IDs
features = {
'all': feature_ids,
'non_hashed': non_hashed,
'hashed': hashed
}
name_feat = dict(zip(feature_names, feature_ids))
feat_name = dict(zip(feature_ids, feature_names))
if dataset != 'drebin':
feasible = features['non_hashed'].copy()
for u_f in feats_to_exclude:
feasible.remove(name_feat[u_f])
features['feasible'] = feasible
if vrb:
print(
'Total number of features: {}\n'
'Number of non hashed features: {}\n'
'Number of hashed features: {}\n'
'Number of feasible features: {}\n'.format(
len(features['all']),
len(features['non_hashed']),
len(features['hashed']),
len(features['feasible'])
)
)
print('\nList of non-hashed features:')
print(
['{}: {}'.format(f, feat_name[f]) for f in features['non_hashed']]
)
print('\nList of feasible features:')
print(
['{}: {}'.format(f, feat_name[f]) for f in features['feasible']]
)
return features, feature_names, name_feat, feat_name
def load_pdf_features():
""" Load the PDF dataset feature list
:return: (ndarray) array of feature names for the pdf dataset
"""
arbitrary_feat = [
'author_dot',
'keywords_dot',
'subject_dot',
'author_lc',
'keywords_lc',
'subject_lc',
'author_num',
'keywords_num',
'subject_num',
'author_oth',
'keywords_oth',
'subject_oth',
'author_uc',
'keywords_uc',
'subject_uc',
'createdate_ts',
'moddate_ts',
'title_dot',
'createdate_tz',
'moddate_tz',
'title_lc',
'creator_dot',
'producer_dot',
'title_num',
'creator_lc',
'producer_lc',
'title_oth',
'creator_num',
'producer_num',
'title_uc',
'creator_oth',
'producer_oth',
'version',
'creator_uc',
'producer_uc'
]
feature_names = np.load('saved_files/pdf_features.npy')
non_hashed = [np.searchsorted(feature_names, f) for f in sorted(arbitrary_feat)]
hashed = list(range(feature_names.shape[0]))
hashed = list(set(hashed) - set(non_hashed))
return feature_names, non_hashed, hashed
def build_feature_names(dataset='ember'):
""" Return the list of feature names for the specified dataset.
:param dataset: (str) dataset identifier
:return: (list) list of feature names
"""
features, feature_names, name_feat, feat_name = load_features(
feats_to_exclude=[],
dataset=dataset
)
return feature_names.tolist()
def load_drebin_features(infeas, selected=False):
""" Return the list of Drebin features.
Due to the huge number of features we will use the vectorizer file saved
during the preprocessing.
:return:
"""
prefixes = {
'activity': 'manifest',
'api_call': 'code',
'call': 'code',
'feature': 'manifest',
'intent': 'manifest',
'permission': 'manifest',
'provider': 'manifest',
'real_permission': 'code',
'service_receiver': 'manifest',
'url': 'code'
}
vec_file = os.path.join(constants.DREBIN_DATA_DIR, 'vectorizer.pkl')
s_feat_file = os.path.join(constants.DREBIN_DATA_DIR, 's_feat_sel.npy')
# Check if the vectorizer file is available, otherwise create it
if not os.path.isfile(vec_file):
load_drebin_dataset(selected=selected)
if selected and not os.path.isfile(s_feat_file):
load_drebin_dataset(selected=selected)
vectorizer = joblib.load(vec_file)
feature_names = np.array(sorted(list(vectorizer.vocabulary_.keys())))
if selected:
s_f = np.load(s_feat_file)
feature_names = feature_names[s_f]
n_f = feature_names.shape[0]
feasible = [i for i in range(n_f) if feature_names[i].split('::')[0] not in infeas]
hashed = [i for i in range(n_f) if prefixes[feature_names[i].split('::')[0]] == 'code']
non_hashed = [i for i in range(n_f) if prefixes[feature_names[i].split('::')[0]] == 'manifest']
return feature_names, non_hashed, hashed, feasible
# DATA SETS
def load_dataset(dataset='ember', selected=False):
if dataset == 'ember':
x_train, y_train, x_test, y_test = load_ember_dataset(True)
elif dataset == 'ogcontagio':
x_train, y_train, x_test, y_test = load_pdf_dataset()
elif dataset == 'drebin':
x_train, y_train, x_test, y_test = load_drebin_dataset(selected)
else:
raise NotImplementedError('Dataset {} not supported'.format(dataset))
return x_train, y_train, x_test, y_test
# noinspection PyBroadException
def load_ember_dataset(binarized=False):
""" Return train and test data from EMBER.
:return: (array, array, array, array)
"""
# Perform feature vectorization only if necessary.
try:
if not binarized:
x_train, y_train, x_test, y_test = ember.read_vectorized_features(
constants.EMBER_DATA_DIR,
feature_version=1
)
else:
x_train = np.load(os.path.join(constants.EMBER_DATA_DIR, "x_train.npy"))
x_test = np.load(os.path.join(constants.EMBER_DATA_DIR, "x_test.npy"))
y_train = np.load(os.path.join(constants.EMBER_DATA_DIR, "y_train.npy"))
y_test = np.load(os.path.join(constants.EMBER_DATA_DIR, "y_test.npy"))
print("load binarized=%s" % str(binarized))
except:
ember.create_vectorized_features(
constants.EMBER_DATA_DIR,
feature_version=1
)
x_train, y_train, x_test, y_test = ember.read_vectorized_features(
constants.EMBER_DATA_DIR,
feature_version=1
)
x_train = x_train.astype(dtype='float64')
x_test = x_test.astype(dtype='float64')
# Get rid of unknown labels
x_train = x_train[y_train != -1]
y_train = y_train[y_train != -1]
x_test = x_test[y_test != -1]
y_test = y_test[y_test != -1]
return x_train, y_train, x_test, y_test
def load_pdf_dataset():
mw_file = 'ogcontagio_mw.npy'
gw_file = 'ogcontagio_gw.npy'
# Load malicious
mw = np.load(
# os.path.join(constants.SAVE_FILES_DIR, mw_file),
os.path.join('data/', mw_file),
allow_pickle=True
).item()
mwdf = pd.DataFrame(mw)
mwdf = mwdf.transpose()
mwdf['class'] = [True] * mwdf.shape[0]
mwdf.index.name = 'filename'
mwdf = mwdf.reset_index()
train_mw, test_mw = train_test_split(mwdf, test_size=0.4, random_state=42)
# Load benign
gw = np.load(
# os.path.join(constants.SAVE_FILES_DIR, gw_file),
os.path.join('data/', gw_file),
allow_pickle=True
).item()
gwdf = pd.DataFrame(gw)
gwdf = gwdf.transpose()
gwdf['class'] = [False] * gwdf.shape[0]
gwdf.index.name = 'filename'
gwdf = gwdf.reset_index()
train_gw, test_gw = train_test_split(gwdf, test_size=0.4, random_state=42)
# Merge dataframes
train_df = pd.concat([train_mw, train_gw])
test_df =
|
pd.concat([test_mw, test_gw])
|
pandas.concat
|
import pandas as pd
import numpy as np
# MOVING AVERAGE
def moving_avg(array,window=7):
array_m = [array[0]]*(window)
ll = len(array)
for i in range(ll-(window-1)):
array_m.append(np.mean(array[i:i+window]))
return np.array(array_m)
# EPIDEMIC DATA (INTERNATIONAL)
def get_epidemic_data(country):
# source Johns Hopkins Unversity
file_confirmed='https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
file_deaths='https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
file_recovered='https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv'
df_confirmed=pd.read_csv(file_confirmed)
df_deaths=pd.read_csv(file_deaths)
df_recovered=
|
pd.read_csv(file_recovered)
|
pandas.read_csv
|
from html.parser import HTMLParser
from bs4 import BeautifulSoup
import glob, os
import pandas as pd
import string
import nltk
import re
from nltk.corpus import stopwords
from nltk.stem.snowball import RussianStemmer
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
def parse(dir):
files = []
for file in glob.glob(f'{dir}/messages*'):
files.append(file)
fid = 0
messages = []
for file in files:
fid += 1
print('parsing', fid, len(files))
doc = BeautifulSoup(open(file), 'html.parser')
doc_messages = doc.find_all('div', ['message default clearfix', 'message default clearfix joined'])
messages.extend(doc_messages)
data = {}
id = 0
for raw_message in messages:
id += 1
if id % 100 == 0:
print('processing', id, len(messages))
author = raw_message.find('div', class_='initials')
author_name = raw_message.find('div', class_='from_name')
if author is not None:
last_author = author
last_author_name = author_name
message = raw_message.find('div', class_='text')
date = raw_message.find('div', class_='pull_right date details')
if message is not None:
author_data = last_author.text.strip()
author_name_data = last_author_name.text.strip()
timestamp_data = pd.to_datetime(date['title'], dayfirst=True)
text_data = message.text.strip()
data[id] = (author_data, author_name_data, timestamp_data, text_data)
df = pd.DataFrame.from_dict(data, orient='index', columns=['author_initials', 'author_name', 'timestamp', 'text'])
df.to_csv('crab_data.csv', encoding='utf-8')
def plot_general_activity():
df = pd.read_csv('crab_data.csv').drop('Unnamed: 0', axis=1)
df = df.set_index('timestamp')
df.index =
|
pd.to_datetime(df.index)
|
pandas.to_datetime
|
from dateutil import parser
import numpy as np
import pandas as pd
import urllib3
import json
import datetime as dt
import time
import warnings
import math
#######################################################################
# drops invalid data from our history
def dropDirty(history, exWeekends):
history = history[(history.Open != 0)
& (history.High != 0)
& (history.Low != 0)
& (history.Close != 0)]
history = history[(pd.isnull(history.Open) == False)
& (pd.isnull(history.High) == False)
& (pd.isnull(history.Low) == False)
& (
|
pd.isnull(history.Close)
|
pandas.isnull
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from cbalancer import app
from flask import render_template, request
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
import psycopg2
import pandas as pd
from model import *
from bokeh.plotting import figure
from bokeh.resources import CDN
from bokeh.embed import file_html
from bokeh.models import FuncTickFormatter, HoverTool
import folium
from datetime import datetime
import pytz
import pdb
# use US/Eastern timezone
est = pytz.timezone('US/Eastern')
username = 'psam071'
host = 'localhost'
dbname = 'citibike'
db = create_engine('postgres://%s%s/%s' % (username,host,dbname))
con = None
con = psycopg2.connect(database = dbname, user = username, host = host)
reg = load_regressor()
# SQL query to get all bike info for 2016
def fetch_query(number):
query = """
SELECT a.id, a.date, a.hour, bikes_out, bikes_in, dayofweek, month, is_weekday, is_holiday, tot_docks, avail_bikes, avail_docks, precip, temp
FROM features_subset a
LEFT JOIN weather b ON a.date = b.date AND a.hour = b.hour
LEFT JOIN stations c ON a.id = c.id
WHERE a.id = {}
AND tot_docks > 0
--AND a.date > '2016-03-01'
--WHERE tot_docks > 0
ORDER BY a.id, a.date, a.hour;
""".format(number)
df=
|
pd.read_sql_query(query,con)
|
pandas.read_sql_query
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.