prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
import lightgbm as lgb
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
import matplotlib as mpl
from sklearn import preprocessing as pp
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import log_loss
from sklearn.metrics import precision_recall_curve, average_precision_score
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.metrics import confusion_matrix, classification_report
get_ipython().run_line_magic('matplotlib', 'inline')
current_path = os.getcwd()
file = '/CHANGE ME TO CORRECT PATH/features.csv'
data = pd.read_csv(current_path + file)
dataX = data.copy().drop(['Class'],axis=1)
dataY = data['Class'].copy()
featuresToScale = dataX.drop(['Time'],axis=1).columns
sX = pp.StandardScaler(copy=True)
dataX.loc[:,featuresToScale] = sX.fit_transform(dataX[featuresToScale])
scalingFactors = pd.DataFrame(data=[sX.mean_,sX.scale_],index=['Mean','StDev'],columns=featuresToScale)
X_train, X_test, y_train, y_test = train_test_split(dataX,
dataY, test_size=0.10,
random_state=2019, stratify=dataY)
#Trainingset 10-fold cross validation
k_fold = StratifiedKFold(n_splits=10,shuffle=True,random_state=2018)
penalty = 'l2'
C = 1.0
class_weight = 'balanced'
random_state = 2018
solver = 'liblinear'
n_jobs = 1
logReg = LogisticRegression(penalty=penalty, C=C,
class_weight=class_weight, random_state=random_state,
solver=solver, n_jobs=n_jobs)
trainingScores = []
cvScores = []
predictionsBasedOnKFolds = pd.DataFrame(data=[],
index=y_train.index,columns=[0,1])
model = logReg
for train_index, cv_index in k_fold.split(np.zeros(len(X_train))
,y_train.ravel()):
X_train_fold, X_cv_fold = X_train.iloc[train_index,:], X_train.iloc[cv_index,:]
y_train_fold, y_cv_fold = y_train.iloc[train_index], y_train.iloc[cv_index]
model.fit(X_train_fold, y_train_fold)
loglossTraining = log_loss(y_train_fold,
model.predict_proba(X_train_fold)[:,1])
trainingScores.append(loglossTraining)
predictionsBasedOnKFolds.loc[X_cv_fold.index,:] = model.predict_proba(X_cv_fold)
loglossCV = log_loss(y_cv_fold,
predictionsBasedOnKFolds.loc[X_cv_fold.index,1])
cvScores.append(loglossCV)
preds = pd.concat([y_train,predictionsBasedOnKFolds.loc[:,1]], axis=1)
preds.columns = ['trueLabel','prediction']
predictionsBasedOnKFoldsLogisticRegression = preds.copy()
precision, recall, thresholds = precision_recall_curve(preds['trueLabel'],
preds['prediction'])
average_precision = average_precision_score(preds['trueLabel'],
preds['prediction'])
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(preds['trueLabel'],preds['prediction'])
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
n_estimators = 10
max_features = 'auto'
max_depth = None
min_samples_split = 2
min_samples_leaf = 1
min_weight_fraction_leaf = 0.0
max_leaf_nodes = None
bootstrap = True
oob_score = False
n_jobs = -1
random_state = 2018
class_weight = 'balanced'
RFC = RandomForestClassifier(n_estimators=n_estimators,
max_features=max_features, max_depth=max_depth,
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_leaf_nodes=max_leaf_nodes, bootstrap=bootstrap,
oob_score=oob_score, n_jobs=n_jobs, random_state=random_state,
class_weight=class_weight)
trainingScores = []
cvScores = []
predictionsBasedOnKFolds = pd.DataFrame(data=[],
index=y_train.index,columns=[0,1])
model = RFC
for train_index, cv_index in k_fold.split(np.zeros(len(X_train)),
y_train.ravel()):
X_train_fold, X_cv_fold = X_train.iloc[train_index,:], X_train.iloc[cv_index,:]
y_train_fold, y_cv_fold = y_train.iloc[train_index], y_train.iloc[cv_index]
model.fit(X_train_fold, y_train_fold)
loglossTraining = log_loss(y_train_fold, model.predict_proba(X_train_fold)[:,1])
trainingScores.append(loglossTraining)
predictionsBasedOnKFolds.loc[X_cv_fold.index,:] = model.predict_proba(X_cv_fold)
loglossCV = log_loss(y_cv_fold, predictionsBasedOnKFolds.loc[X_cv_fold.index,1])
cvScores.append(loglossCV)
preds = pd.concat([y_train,predictionsBasedOnKFolds.loc[:,1]], axis=1)
preds.columns = ['trueLabel','prediction']
predictionsBasedOnKFoldsRandomForests = preds.copy()
precision, recall, thresholds = precision_recall_curve(preds['trueLabel'],
preds['prediction'])
average_precision = average_precision_score(preds['trueLabel'],
preds['prediction'])
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(preds['trueLabel'],preds['prediction'])
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(
areaUnderROC))
plt.legend(loc="lower right")
plt.show()
params_xGB = {
'nthread':16,
'learning rate': 0.3,
'gamma': 0,
'max_depth': 6,
'min_child_weight': 1,
'max_delta_step': 0,
'subsample': 1.0,
'colsample_bytree': 1.0,
'objective':'binary:logistic',
'num_class':1,
'eval_metric':'logloss',
'seed':2018,
'silent':1
}
trainingScores = []
cvScores = []
predictionsBasedOnKFolds = pd.DataFrame(data=[],
index=y_train.index,columns=['prediction'])
for train_index, cv_index in k_fold.split(np.zeros(len(X_train)),
y_train.ravel()):
X_train_fold, X_cv_fold = X_train.iloc[train_index,:], X_train.iloc[cv_index,:]
y_train_fold, y_cv_fold = y_train.iloc[train_index], y_train.iloc[cv_index]
dtrain = xgb.DMatrix(data=X_train_fold, label=y_train_fold)
dCV = xgb.DMatrix(data=X_cv_fold)
bst = xgb.cv(params_xGB, dtrain, num_boost_round=2000,
nfold=5, early_stopping_rounds=200, verbose_eval=50)
best_rounds = np.argmin(bst['test-logloss-mean'])
bst = xgb.train(params_xGB, dtrain, best_rounds)
loglossTraining = log_loss(y_train_fold, bst.predict(dtrain))
trainingScores.append(loglossTraining)
predictionsBasedOnKFolds.loc[X_cv_fold.index,'prediction'] = bst.predict(dCV)
loglossCV = log_loss(y_cv_fold, predictionsBasedOnKFolds.loc[X_cv_fold.index,'prediction'])
cvScores.append(loglossCV)
preds = pd.concat([y_train,predictionsBasedOnKFolds.loc[:,'prediction']], axis=1)
preds.columns = ['trueLabel','prediction']
predictionsBasedOnKFoldsXGBoostGradientBoosting = preds.copy()
precision, recall, thresholds = precision_recall_curve(preds['trueLabel'],preds['prediction'])
average_precision = average_precision_score(preds['trueLabel'],preds['prediction'])
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(preds['trueLabel'],preds['prediction'])
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
params_lightGB = {
'task': 'train',
'application':'binary',
'num_class':1,
'boosting': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
'metric_freq':50,
'is_training_metric':False,
'max_depth':4,
'num_leaves': 31,
'learning_rate': 0.01,
'feature_fraction': 1.0,
'bagging_fraction': 1.0,
'bagging_freq': 0,
'bagging_seed': 2018,
'verbose': 0,
'num_threads':16
}
trainingScores = []
cvScores = []
predictionsBasedOnKFolds = pd.DataFrame(data=[],
index=y_train.index,columns=['prediction'])
for train_index, cv_index in k_fold.split(np.zeros(len(X_train)),
y_train.ravel()):
X_train_fold, X_cv_fold = X_train.iloc[train_index,:], X_train.iloc[cv_index,:]
y_train_fold, y_cv_fold = y_train.iloc[train_index], y_train.iloc[cv_index]
lgb_train = lgb.Dataset(X_train_fold, y_train_fold)
lgb_eval = lgb.Dataset(X_cv_fold, y_cv_fold, reference=lgb_train)
gbm = lgb.train(params_lightGB, lgb_train, num_boost_round=2000,
valid_sets=lgb_eval, early_stopping_rounds=200)
loglossTraining = log_loss(y_train_fold, gbm.predict(X_train_fold, num_iteration=gbm.best_iteration))
trainingScores.append(loglossTraining)
predictionsBasedOnKFolds.loc[X_cv_fold.index,'prediction'] = gbm.predict(X_cv_fold, num_iteration=gbm.best_iteration)
loglossCV = log_loss(y_cv_fold, predictionsBasedOnKFolds.loc[X_cv_fold.index,'prediction'])
cvScores.append(loglossCV)
preds = pd.concat([y_train,predictionsBasedOnKFolds.loc[:,'prediction']], axis=1)
preds.columns = ['trueLabel','prediction']
predictionsBasedOnKFoldsLightGBMGradientBoosting = preds.copy()
precision, recall, thresholds = precision_recall_curve(preds['trueLabel'],preds['prediction'])
average_precision = average_precision_score(preds['trueLabel'],preds['prediction'])
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(preds['trueLabel'],preds['prediction'])
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
#Testset
predictionsTestSetLogisticRegression = pd.DataFrame(data=[],index=y_test.index,columns=['prediction'])
predictionsTestSetLogisticRegression.loc[:,'prediction'] = logReg.predict_proba(X_test)[:,1]
logLossTestSetLogisticRegression = log_loss(y_test, predictionsTestSetLogisticRegression)
predictionsTestSetRandomForests = pd.DataFrame(data=[],index=y_test.index,columns=['prediction'])
predictionsTestSetRandomForests.loc[:,'prediction'] = RFC.predict_proba(X_test)[:,1]
logLossTestSetRandomForests = log_loss(y_test, predictionsTestSetRandomForests)
predictionsTestSetXGBoostGradientBoosting = pd.DataFrame(data=[],index=y_test.index,columns=['prediction'])
dtest = xgb.DMatrix(data=X_test)
predictionsTestSetXGBoostGradientBoosting.loc[:,'prediction'] = bst.predict(dtest)
logLossTestSetXGBoostGradientBoosting = log_loss(y_test, predictionsTestSetXGBoostGradientBoosting)
predictionsTestSetLightGBMGradientBoosting = pd.DataFrame(data=[],index=y_test.index,columns=['prediction'])
predictionsTestSetLightGBMGradientBoosting.loc[:,'prediction'] = gbm.predict(X_test, num_iteration=gbm.best_iteration)
logLossTestSetLightGBMGradientBoosting = log_loss(y_test, predictionsTestSetLightGBMGradientBoosting)
precision, recall, thresholds = precision_recall_curve(y_test,predictionsTestSetLogisticRegression)
average_precision = average_precision_score(y_test,predictionsTestSetLogisticRegression)
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(y_test,predictionsTestSetLogisticRegression)
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
precision, recall, thresholds = precision_recall_curve(y_test,predictionsTestSetRandomForests)
average_precision = average_precision_score(y_test,predictionsTestSetRandomForests)
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(y_test,predictionsTestSetRandomForests)
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
precision, recall, thresholds = precision_recall_curve(y_test,predictionsTestSetXGBoostGradientBoosting)
average_precision = average_precision_score(y_test,predictionsTestSetXGBoostGradientBoosting)
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(y_test,predictionsTestSetXGBoostGradientBoosting)
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
precision, recall, thresholds = precision_recall_curve(y_test,predictionsTestSetLightGBMGradientBoosting)
average_precision = average_precision_score(y_test,predictionsTestSetLightGBMGradientBoosting)
plt.step(recall, precision, color='k', alpha=0.7, where='post')
plt.fill_between(recall, precision, step='post', alpha=0.3, color='k')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('PRC: Average Precision = {0:0.2f}'.format(
average_precision))
fpr, tpr, thresholds = roc_curve(y_test,predictionsTestSetLightGBMGradientBoosting)
areaUnderROC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC: AUC = {0:0.2f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
# Ensemble of previous algrithms
predictionsBasedOnKFoldsFourModels = | pd.DataFrame(data=[],index=y_train.index) | pandas.DataFrame |
import pandas as pd
import argparse
from difflib import get_close_matches
import PyFloraBook.in_out.data_coordinator as dc
# ---------------- GLOBALS ----------------
# These are the weights used to create the final score (a weighted avg)
WEIGHTS = {
"CalFlora": 1,
"OregonFlora": 1,
"CPNWH_OR": 1,
"CPNWH_WA": 1,
}
WEBSITES = WEIGHTS.keys()
INPUT_SUFFIX = "species"
OUTPUT_SUFFIX = "scores"
# ---------------- INPUT ----------------
# Parse arguments
parser = argparse.ArgumentParser(
description='Gather species counts for given families and analyze'
)
parser.add_argument(
"-f", "--families", nargs='+',
help="Names of the families to be analyzed."
)
args = parser.parse_args()
families = args.families
# Normalize the weights
weights_df = | pd.DataFrame.from_dict(WEIGHTS, orient="index") | pandas.DataFrame.from_dict |
from __future__ import absolute_import
import pandas as pd
from io import StringIO
import zlib
from .. import encode, decode
from ..handlers import BaseHandler, register, unregister
from ..util import b64decode, b64encode
from .numpy import register_handlers as register_numpy_handlers
from .numpy import unregister_handlers as unregister_numpy_handlers
__all__ = ['register_handlers', 'unregister_handlers']
class PandasProcessor(object):
def __init__(self, size_threshold=500, compression=zlib):
"""
:param size_threshold: nonnegative int or None
valid values for 'size_threshold' are all nonnegative
integers and None. If size_threshold is None,
dataframes are always stored as csv strings
:param compression: a compression module or None
valid values for 'compression' are {zlib, bz2, None}
if compresion is None, no compression is applied
"""
self.size_threshold = size_threshold
self.compression = compression
def flatten_pandas(self, buf, data, meta=None):
if self.size_threshold is not None and len(buf) > self.size_threshold:
if self.compression:
buf = self.compression.compress(buf.encode())
data['comp'] = True
data['values'] = b64encode(buf)
data['txt'] = False
else:
data['values'] = buf
data['txt'] = True
data['meta'] = meta
return data
def restore_pandas(self, data):
if data.get('txt', True):
# It's just text...
buf = data['values']
else:
buf = b64decode(data['values'])
if data.get('comp', False):
buf = self.compression.decompress(buf).decode()
meta = data.get('meta', {})
return (buf, meta)
def make_read_csv_params(meta):
meta_dtypes = meta.get('dtypes', {})
# The header is used to select the rows of the csv from which
# the columns names are retrived
header = meta.get('header', [0])
parse_dates = []
converters = {}
dtype = {}
for k, v in meta_dtypes.items():
if v.startswith('datetime'):
parse_dates.append(k)
elif v.startswith('complex'):
converters[k] = complex
else:
dtype[k] = v
return dict(dtype=dtype, header=header,
parse_dates=parse_dates, converters=converters)
class PandasDfHandler(BaseHandler):
pp = PandasProcessor()
def flatten(self, obj, data):
dtype = obj.dtypes.to_dict()
meta = {'dtypes': {k: str(dtype[k]) for k in dtype},
'index': encode(obj.index),
'column_level_names': obj.columns.names,
'header': list(range(len(obj.columns.names)))}
data = self.pp.flatten_pandas(
obj.reset_index(drop=True).to_csv(index=False), data, meta
)
return data
def restore(self, data):
csv, meta = self.pp.restore_pandas(data)
params = make_read_csv_params(meta)
column_levels_names = meta.get('column_level_names', None)
df = (
pd.read_csv(StringIO(csv), **params)
if data['values'].strip()
else | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
from pandas import DataFrame, DatetimeIndex, Series
import pandas._testing as tm
from pandas.core.window import Expanding
def test_doc_string():
df = DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
df.expanding(2).sum()
@pytest.mark.filterwarnings(
"ignore:The `center` argument on `expanding` will be removed in the future"
)
def test_constructor(frame_or_series):
# GH 12669
c = frame_or_series(range(5)).expanding
# valid
c(min_periods=1)
c(min_periods=1, center=True)
c(min_periods=1, center=False)
@pytest.mark.parametrize("w", [2.0, "foo", np.array([2])])
@pytest.mark.filterwarnings(
"ignore:The `center` argument on `expanding` will be removed in the future"
)
def test_constructor_invalid(frame_or_series, w):
# not valid
c = frame_or_series(range(5)).expanding
msg = "min_periods must be an integer"
with pytest.raises(ValueError, match=msg):
c(min_periods=w)
msg = "center must be a boolean"
with pytest.raises(ValueError, match=msg):
c(min_periods=1, center=w)
@pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"])
def test_numpy_compat(method):
# see gh-12811
e = Expanding(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
@pytest.mark.parametrize(
"expander",
[
1,
pytest.param(
"ls",
marks=pytest.mark.xfail(
reason="GH#16425 expanding with offset not supported"
),
),
],
)
def test_empty_df_expanding(expander):
# GH 15819 Verifies that datetime and integer expanding windows can be
# applied to empty DataFrames
expected = DataFrame()
result = DataFrame().expanding(expander).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer expanding windows can be applied
# to empty DataFrames with datetime index
expected = DataFrame(index=DatetimeIndex([]))
result = DataFrame(index=DatetimeIndex([])).expanding(expander).sum()
tm.assert_frame_equal(result, expected)
def test_missing_minp_zero():
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
x = Series([np.nan])
result = x.expanding(min_periods=0).sum()
expected = Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.expanding(min_periods=1).sum()
expected = Series([np.nan])
tm.assert_series_equal(result, expected)
def test_expanding_axis(axis_frame):
# see gh-23372.
df = DataFrame(np.ones((10, 20)))
axis = df._get_axis_number(axis_frame)
if axis == 0:
expected = DataFrame(
{i: [np.nan] * 2 + [float(j) for j in range(3, 11)] for i in range(20)}
)
else:
# axis == 1
expected = DataFrame([[np.nan] * 2 + [float(i) for i in range(3, 21)]] * 10)
result = df.expanding(3, axis=axis_frame).sum()
tm.assert_frame_equal(result, expected)
def test_expanding_count_with_min_periods(frame_or_series):
# GH 26996
result = frame_or_series(range(5)).expanding(min_periods=3).count()
expected = frame_or_series([np.nan, np.nan, 3.0, 4.0, 5.0])
tm.assert_equal(result, expected)
def test_expanding_count_default_min_periods_with_null_values(frame_or_series):
# GH 26996
values = [1, 2, 3, np.nan, 4, 5, 6]
expected_counts = [1.0, 2.0, 3.0, 3.0, 4.0, 5.0, 6.0]
result = frame_or_series(values).expanding().count()
expected = frame_or_series(expected_counts)
tm.assert_equal(result, expected)
def test_expanding_count_with_min_periods_exceeding_series_length(frame_or_series):
# GH 25857
result = frame_or_series(range(5)).expanding(min_periods=6).count()
expected = frame_or_series([np.nan, np.nan, np.nan, np.nan, np.nan])
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"df,expected,min_periods",
[
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
3,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
2,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
1,
),
(DataFrame({"A": [1], "B": [4]}), [], 2),
(DataFrame(), [({}, [])], 1),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
3,
),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
2,
),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
1,
),
],
)
def test_iter_expanding_dataframe(df, expected, min_periods):
# GH 11704
expected = [DataFrame(values, index=index) for (values, index) in expected]
for (expected, actual) in zip(expected, df.expanding(min_periods)):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize(
"ser,expected,min_periods",
[
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 3),
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 2),
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 1),
(Series([1, 2]), [([1], [0]), ([1, 2], [0, 1])], 2),
(Series([np.nan, 2]), [([np.nan], [0]), ([np.nan, 2], [0, 1])], 2),
(Series([], dtype="int64"), [], 2),
],
)
def test_iter_expanding_series(ser, expected, min_periods):
# GH 11704
expected = [Series(values, index=index) for (values, index) in expected]
for (expected, actual) in zip(expected, ser.expanding(min_periods)):
| tm.assert_series_equal(actual, expected) | pandas._testing.assert_series_equal |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected):
# GH#38708
series = Series(data)
result = repr(series)
assert result == expected
@pytest.mark.parametrize(
"float_format,expected",
[
("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
],
)
def test_repr_float_format_in_object_col(self, float_format, expected):
# GH#40024
df = Series([1000.0, "test"])
with option_context("display.float_format", float_format):
result = repr(df)
assert result == expected
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_categorical_columns(self):
# GH35439
data = [[4, 2], [3, 2], [4, 3]]
cols = ["aaaaaaaaa", "b"]
df = DataFrame(data, columns=cols)
df_cat_cols = DataFrame(data, columns=pd.CategoricalIndex(cols))
assert df.to_string() == df_cat_cols.to_string()
def test_period(self):
# GH 12615
df = DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
@pytest.mark.parametrize(
"length, max_rows, min_rows, expected",
[
(10, 10, 10, 10),
(10, 10, None, 10),
(10, 8, None, 8),
(20, 30, 10, 30), # max_rows > len(frame), hence max_rows
(50, 30, 10, 10), # max_rows < len(frame), hence min_rows
(100, 60, 10, 10), # same
(60, 60, 10, 60), # edge case
(61, 60, 10, 10), # edge case
],
)
def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
"""Check that display logic is correct.
GH #37359
See description here:
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
"""
formatter = fmt.DataFrameFormatter(
DataFrame(np.random.rand(length, 3)),
max_rows=max_rows,
min_rows=min_rows,
)
result = formatter.max_rows_fitted
assert result == expected
def gen_series_formatting():
s1 = Series(["a"] * 100)
s2 = Series(["ab"] * 100)
s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(["\u03c3"] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = "%.4f".__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == "Series([], Freq: B)"
result = self.ts[:0].to_string(length=0)
assert result == "Series([], Freq: B)"
# name and length
cp = self.ts.copy()
cp.name = "foo"
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split("\n")[-1].strip()
assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Series(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_to_string_mixed(self):
s = Series(["foo", np.nan, -1.23, 4.56])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Series(["foo", np.nan, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Series(["foo", 5, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.to_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = "1\n" + "2\n" + "3\n" + "4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444],
index=[1, "AB", Timestamp("2011-01-01"), "あああ"],
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n"
" ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# ambiguous unicode
s = Series(
["¡¡", "い¡¡", "ううう", "ええええ"], index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"]
)
expected = (
"ああ ¡¡\n"
"¡¡¡¡いい い¡¡\n"
"¡¡ ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
for line in repr(Series(vals)).split("\n"):
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert "+010" in line
else:
assert "+10" in line
def test_datetimeindex(self):
index = date_range("20130102", periods=6)
s = Series(1, index=index)
result = s.to_string()
assert "2013-01-02" in result
# nat in index
s2 = Series(2, index=[Timestamp("20130111"), NaT])
s = s2.append(s)
result = s.to_string()
assert "NaT" in result
# nat in summary
result = str(s2.index)
assert "NaT" in result
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
s1 = Series(date_range(start=start_date, freq="D", periods=5))
result = str(s1)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
s2 = Series(3, index=dti)
result = str(s2.index)
assert start_date in result
def test_timedelta64(self):
from datetime import (
datetime,
timedelta,
)
Series(np.array([1100, 20], dtype="timedelta64[ns]")).to_string()
s = Series(date_range("2012-1-1", periods=3, freq="D"))
# GH2146
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
assert "1 days" in result
assert "00:00:00" not in result
assert "NaT" in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:59:59.999850" in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:00:00" in result
assert "1 days 23:00:00" in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:59:00" in result
assert "1 days 22:59:00" in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:58:59.999850" in result
assert "0 days 22:58:59.999850" in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - s2
result = y.to_string()
assert "-1 days +23:54:57" in result
td = timedelta(microseconds=550)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - td
result = y.to_string()
assert "2012-01-01 23:59:59.999450" in result
# no boxing of the actual elements
td = Series(pd.timedelta_range("1 days", periods=3))
result = td.to_string()
assert result == "0 1 days\n1 2 days\n2 3 days"
def test_mixed_datetime64(self):
df = DataFrame({"A": [1, 2], "B": ["2012-01-01", "2012-01-02"]})
df["B"] = pd.to_datetime(df.B)
result = repr(df.loc[0])
assert "2012-01-01" in result
def test_period(self):
# GH 12615
index = pd.period_range("2013-01", periods=6, freq="M")
s = Series(np.arange(6, dtype="int64"), index=index)
exp = (
"2013-01 0\n"
"2013-02 1\n"
"2013-03 2\n"
"2013-04 3\n"
"2013-05 4\n"
"2013-06 5\n"
"Freq: M, dtype: int64"
)
assert str(s) == exp
s = Series(index)
exp = (
"0 2013-01\n"
"1 2013-02\n"
"2 2013-03\n"
"3 2013-04\n"
"4 2013-05\n"
"5 2013-06\n"
"dtype: period[M]"
)
assert str(s) == exp
# periods with mixed freq
s = Series(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
]
)
exp = (
"0 2011-01\n1 2011-02-01\n"
"2 2011-03-01 09:00\ndtype: object"
)
assert str(s) == exp
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
s = Series(np.random.randn(8), index=index)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 10
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 10
# index
s = Series(np.random.randn(8), None)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 9
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 3
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 9
# Make sure #8532 is fixed
def test_consistent_format(self):
s = Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10, "display.show_dimensions", False):
res = repr(s)
exp = (
"0 1.0000\n1 1.0000\n2 1.0000\n3 "
"1.0000\n4 1.0000\n ... \n125 "
"1.0000\n126 1.0000\n127 0.9999\n128 "
"1.0000\n129 1.0000\ndtype: float64"
)
assert res == exp
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split("\n")
lines = [
line for line in repr(s).split("\n") if not re.match(r"[^\.]*\.+", line)
][:-1]
ncolsizes = len({len(line.strip()) for line in lines})
assert ncolsizes == 1
def test_format_explicit(self):
test_sers = gen_series_formatting()
with option_context("display.max_rows", 4, "display.show_dimensions", False):
res = repr(test_sers["onel"])
exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object"
assert exp == res
res = repr(test_sers["twol"])
exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object"
assert exp == res
res = repr(test_sers["asc"])
exp = (
"0 a\n1 ab\n ... \n4 abcde\n5 "
"abcdef\ndtype: object"
)
assert exp == res
res = repr(test_sers["desc"])
exp = (
"5 abcdef\n4 abcde\n ... \n1 ab\n0 "
"a\ndtype: object"
)
assert exp == res
def test_ncols(self):
test_sers = gen_series_formatting()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10), dtype="int64")
with option_context("display.max_rows", 1):
strrepr = repr(s).split("\n")
exp1 = ["0", "0"]
res1 = strrepr[0].split()
assert exp1 == res1
exp2 = [".."]
res2 = strrepr[1].split()
assert exp2 == res2
def test_truncate_ndots(self):
def getndots(s):
return len(re.match(r"[^\.]*(\.*)", s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 2
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 3
def test_show_dimensions(self):
# gh-7117
s = Series(range(5))
assert "Length" not in repr(s)
with option_context("display.max_rows", 4):
assert "Length" in repr(s)
with option_context("display.show_dimensions", True):
assert "Length" in repr(s)
with option_context("display.max_rows", 4, "display.show_dimensions", False):
assert "Length" not in repr(s)
def test_repr_min_rows(self):
s = Series(range(20))
# default setting no truncation even if above min_rows
assert ".." not in repr(s)
s = Series(range(61))
# default of max_rows 60 triggers truncation if above
assert ".." in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(s)
assert "2 " not in repr(s)
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(s)
with | option_context("display.max_rows", None, "display.min_rows", 12) | pandas.option_context |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/2 23:26
Desc: 东方财富网-行情首页-沪深京 A 股
"""
import requests
import pandas as pd
def stock_zh_a_spot_em() -> pd.DataFrame:
"""
东方财富网-沪深京 A 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://82.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:6,m:0 t:80,m:1 t:2,m:1 t:23,m:0 t:81 s:2048",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = temp_df.index + 1
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.to_numeric(temp_df["最低"], errors="coerce")
temp_df["今开"] = pd.to_numeric(temp_df["今开"], errors="coerce")
temp_df["昨收"] = pd.to_numeric(temp_df["昨收"], errors="coerce")
temp_df["量比"] = pd.to_numeric(temp_df["量比"], errors="coerce")
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"], errors="coerce")
temp_df["市盈率-动态"] = pd.to_numeric(temp_df["市盈率-动态"], errors="coerce")
temp_df["市净率"] = pd.to_numeric(temp_df["市净率"], errors="coerce")
return temp_df
def stock_zh_b_spot_em() -> pd.DataFrame:
"""
东方财富网- B 股-实时行情
http://quote.eastmoney.com/center/gridlist.html#hs_a_board
:return: 实时行情
:rtype: pandas.DataFrame
"""
url = "http://28.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "5000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:0 t:7,m:1 t:3",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152",
"_": "1623833739532",
}
r = requests.get(url, params=params)
data_json = r.json()
if not data_json["data"]["diff"]:
return pd.DataFrame()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.columns = [
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"量比",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.rename(columns={"index": "序号"}, inplace=True)
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"量比",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.t | o_numeric(temp_df["最低"], errors="coerce") | pandas.to_numeric |
import os
import csv
import re
import csv
import math
from collections import defaultdict
from scipy.signal import butter, lfilter
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from statistics import mean
from scipy.stats import kurtosis, skew
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from numpy import mean
from numpy import std
class MobiActDatafileParser:
def __init__(self, filename, filepath, label):
self.filename = filename
self.filepath = filepath
self.label = label
def read_data(self):
try:
csv_data = pd.read_csv(self.filepath+self.filename)
csv_data.drop('timestamp', inplace=True, axis=1)
csv_data.drop('rel_time', inplace=True, axis=1)
csv_data.drop('azimuth', inplace=True, axis=1)
csv_data.drop('pitch', inplace=True, axis=1)
csv_data.drop('roll', inplace=True, axis=1)
csv_data.drop('label', inplace=True, axis=1)
acc_x_data, acc_y_data, acc_z_data = csv_data['acc_x'].to_numpy(), csv_data['acc_y'].to_numpy(), csv_data['acc_z'].to_numpy()
gyr_x_data, gyr_y_data, gyr_z_data = csv_data['gyro_x'].to_numpy(), csv_data['gyro_y'].to_numpy(), csv_data['gyro_z'].to_numpy()
acc_data, gyr_data, acc2_data = pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
acc_data['fx'] = pd.Series(self.butterworth_low_pass(acc_x_data, 5.0, 200.0, 4)).dropna()
acc_data['fy'] = pd.Series(self.butterworth_low_pass(acc_y_data, 5.0, 200.0, 4)).dropna()
acc_data['fz'] = pd.Series(self.butterworth_low_pass(acc_z_data, 5.0, 200.0, 4)).dropna()
gyr_data['fx'] = | pd.Series(gyr_x_data) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Mon 11 January 2022
Modified by <EMAIL> on 21/10/2021
@author: <NAME>
@contact: <EMAIL>
@license: /
"""
import mmap
import os
import sys
import numpy as np
import pandas as pd
from io import StringIO
from configparser import ConfigParser
class CustomParser(ConfigParser):
"""
Original from https://gitlab.cern.ch/datanaso/dEval/-/blob/master/etc/utils.py
ConfigParser with a custom dictionary conversion method.
"""
def as_dict(self):
d = dict(self._sections)
for k in d:
d[k] = dict(self._defaults, **d[k])
d[k].pop('__name__', None)
return d
class ProcessorBase():
'''
Base-class for MR-ToF MS data proessing
'''
def __init__(self):
self.files = []
self.data = {}
self.pars = {}
self.df_dict = {}
self.step = 0
def add_all(self, to_csv=False):
'''
Returns sum of all handled files while incrementing sweep numbers from one file to the next to
get a rolling sweep number.
Parameters:
- to_csv: File name to store combined .csv in
Return:
- flattened and sweep-adjusted dataframe
'''
if len(self.files) == 0:
print(f"(ProcessorBase.add_all): Data not processed yet or empty.")
return
# Adjust the sweep numbers
for i in np.arange(0, len(self.df_dict)):
if i == 0:
continue
key = list(self.df_dict.keys())[i]
key_m1 = list(self.df_dict.keys())[i-1]
self.df_dict[key]['sweep'] += self.df_dict[key_m1].iloc[-1]['sweep'] + 1
#
df = pd.concat(self.df_dict)
# Save to file if file name is passed
if to_csv != False:
df.to_csv(to_csv, index=False)
#
return df
class MCS6Lst(ProcessorBase):
'''
Process each list file contained in array of lst_files
Param:
lst_files: array of list files
'''
"""
Created on Mon 23 April 2018
Modified and adapted to Python 3 on Wed 17 July 2019
Modified by <EMAIL> on 21/10/2021
@author: <NAME>
@author: <NAME>
@contact: <EMAIL>
@contact: <EMAIL>
@license: MIT license
"""
def __init__(self):
"""
Initialize the conversion dataframe and some other varaibles
"""
self.files = []
#-------------------------Create a dataframe containing the conversion table from the MCS6A manual--------------------------------------#
Time_Patch_Value = ['0', '5', '1', '1a', '2a', '22', '32','2','5b','Db','f3','43','c3','3']
conversion_dict = {'Data_Length' : pd.Series([2,4,4,6,6,6,6,6,8,8,8,8,8,8], index=Time_Patch_Value),
'Data_Lost_Bit' : pd.Series([np.nan ,np.nan ,np.nan ,np.nan ,np.nan ,np.nan ,47,np.nan ,63,np.nan ,47,63,np.nan ,63], index=Time_Patch_Value),
'Tag_Bits' : pd.Series([(np.nan,np.nan) ,(np.nan,np.nan) ,(np.nan,np.nan) ,(np.nan,np.nan) ,(40,47),(40,47),(np.nan,np.nan), (np.nan,np.nan), (48,62),(48,63),(48,63),(48,62),(48,63),(58,62)], index=Time_Patch_Value),
'Sweep_Counter': pd.Series([(np.nan,np.nan),(24,31),(np.nan,np.nan),(32,47),(32,39),(np.nan,np.nan),(40,46),(np.nan,np.nan),(32,47),(32,47),(40,46),(np.nan,np.nan),(np.nan,np.nan),(np.nan,np.nan)], index=Time_Patch_Value),
'Time_Bits': pd.Series([12,20,28,28,28,36,36,44,28,28,36,44,44,54], index=Time_Patch_Value),
'Max_Sweep_Length': pd.Series([0.0000004096,0.000105,0.027,0.027,0.027,6.872,6.872,1759.2,0.027,0.027,6.872,1759.2,1759.2,1801440], index=Time_Patch_Value)}
self.conversion_df = pd.DataFrame(conversion_dict)
self.df_dict = {}
def convert_bytes(self,bytearray,nb_bits,data_lost_bit,tag_bits,sweep_counter,time_bits,verbose=0):
'''
Perform the actual conversion of a single event from binary to integer numbers
See pages 5-19 and 5-20 of FastCom MCS6A for more information on the bits meaning
Param:
bytearray : an array of nb_bits/8 bytes encapsulating the data of a single MCS6A stop event
nb_bits : total number of bits on which the data is encoded
data_lost_bit : Data lost bit. Indicates if the fifo was fullself. 1 bit index
tag_bits : Tag bits. Tag info of a single stop event (see manual). Tuple containing the bit indexes.
sweep_counter: Sweep number of a single stop event. Tuple containing the bit indexes.
time_bits : Number of bits encoding the time of flight a single stop eventself.
The tof seems to be given in an unbinned format with 100ps resolution (to be confirmed).
Return:
Decoded tof, sweep, channel, edge, tag, fifo
'''
bit_word = ''
for bytes in reversed(bytearray):
bit_word += '{0:08b}'.format(ord(chr(bytes)))
#convert data lost bit always first index in the reversed array (last in the manual)
if np.isnan(data_lost_bit):
fifo = np.nan
index_data_lost_bit = -1
else:
index_data_lost_bit = nb_bits-1-int(data_lost_bit)
fifo = int(bit_word[index_data_lost_bit],2)
#convert tag bit
if np.isnan(tag_bits[0]):
tag = np.nan
index_high_tag = -1
index_low_tag = -1
else:
index_high_tag = nb_bits-1-int(tag_bits[1])
index_low_tag = nb_bits-1-int(tag_bits[0])
tag = int(bit_word[index_high_tag:index_low_tag+1],2)
#convert sweep number
if np.isnan(sweep_counter[0]):
sweep = np.nan
index_high_sweep = -1
index_low_sweep = -1
else:
index_high_sweep = nb_bits-1-int(sweep_counter[1])
index_low_sweep = nb_bits-1-int(sweep_counter[0])
sweep = int(bit_word[index_high_sweep:index_low_sweep+1],2)
if bit_word != "000000000000000000000000000000000000000000000000" and verbose>1:
print(f"bit_word: {bit_word}")
print(f"index_data_lost_bit: {fifo}")
print(f"index_high_tag: {index_high_tag}")
print(f"index_low_tag: {index_low_tag}")
print(f"tag: {tag}")
print(f"index_high_sweep: {index_high_sweep}")
print(f"index_low_sweep: {index_low_sweep}")
print(f"sweep: {sweep}")
#convert time of flight
index_high_tof = max(index_low_sweep,index_low_tag,index_data_lost_bit)+1
index_low_tof = index_high_tof+time_bits
tof = int(bit_word[index_high_tof:index_low_tof],2)
#these are always there no matter the format
channel = int(bit_word[index_low_tof+1:],2)
edge = int(bit_word[index_low_tof],2)
# if tof != 0:
# print(tof, sweep-1, channel, edge, tag, fifo)
return tof, sweep-1, channel, edge, tag, fifo
def decode_binary(self,binary,time_patch, verbose = 0):
'''
Read the binary part of the file by chunks and decode each chunk according to the format
given in time_patch
The length of a chunk is given by the time_patch
Param : binary part of
Return: nunpy array containing the converted data : tof, sweep, channel, edge, tag, fifo
'''
#-----------extract data from the dataframe--------------------------#
data_length = int(self.conversion_df.loc[time_patch.decode('ascii'),'Data_Length'])
nb_bits = 8*data_length #convert nb of bytes into nb of bits
data_lost_bit = self.conversion_df.loc[time_patch.decode('ascii'),'Data_Lost_Bit']
tag_bits = self.conversion_df.loc[time_patch.decode('ascii'),'Tag_Bits']
sweep_counter = self.conversion_df.loc[time_patch.decode('ascii'),'Sweep_Counter']
time_bits = int(self.conversion_df.loc[time_patch.decode('ascii'),'Time_Bits'])
max_sweep_length = self.conversion_df.loc[time_patch.decode('ascii'),'Max_Sweep_Length']
steps = len(binary[binary.tell():])/data_length
first_it = True
if verbose>1:
print(f"Data length: {data_length}\nN bits: {nb_bits}\nData lost bit: {data_lost_bit}\n\
tag bits: {tag_bits}\nsweep_counter: {sweep_counter}\ntime_bits: {time_bits}\n\
max sweep length: {max_sweep_length}\nsteps: {steps}\n")
# !
# Introduce sweep_counter_overflow: in some cases, MCS6 seems to allocated only a small amount of bits for storing the sweep number.
# In time_patch=32 this is for example only 7 bits -> can count to 128 and then resets to 0. With the overflow counter we just
# count how many overflows happen and just at the necessary amount of sweeps to the overall sweep number
sweep_counter_overflow = 0
old_sweep = 0 # for detecting when overflow happens
# loop through all bytewords
for i in range(int(steps)):
if verbose>0:
if (i%(int(steps/10))==0):
print(f"Step {i} of {steps}.")
byteword = binary.read(data_length)
tof, sweep, channel, edge, tag, fifo = self.convert_bytes(byteword,nb_bits,
data_lost_bit, tag_bits, sweep_counter, time_bits, verbose=verbose)
# Check whether overflow happened (for example old_sweep = 127, new sweep is 0)
# Only do for non-zero events:
if tof != 0:
if verbose>1: print(f"old_sweep: {old_sweep}")
if old_sweep > sweep:
sweep_counter_overflow += 1
if verbose>1: print(f"sweep_counter_overflow: {sweep_counter_overflow}")
old_sweep = sweep
# Add overflow to the sweep number (in case sweep has 7bit int -> 2**7=128)
sweep += sweep_counter_overflow*(2**(sweep_counter[1]-sweep_counter[0]+1))
if verbose>1: print(f"sweep: {sweep}")
#
if channel != 0 :#means for real data
if first_it:
converted_data = np.array([tof, sweep, channel, edge, tag, fifo])
first_it = False
else :
converted_data = np.vstack((converted_data, np.array([tof, sweep, channel, edge, tag, fifo])))
binary.close()
return converted_data
def get_time_patch_and_binary(self, listfile, verbose=False):
'''
Memory map the list file and isolate the time_patch and the binary part of the data
Param:
listfile : input list file
Return:
mapped_file : memore map of the input listfile
time_patch : string code indicating the format in which the data are written (see manual)
'''
mapped_file = mmap.mmap(listfile.fileno(), 0, access=mmap.ACCESS_READ)
search_dict = {'section' : '[DATA]' , 'list_file_type' : 'time_patch'}
#-----------------set file index to time patch code -----------------#
pos_type_from = mapped_file.find(search_dict['list_file_type'].encode('ascii'))+len(search_dict['list_file_type'])+1
mapped_file.seek(pos_type_from)
time_patch = mapped_file.readline().strip('\r\n'.encode('ascii'))
#-----------set file index to beginning of DATA-----------------------------------#
pos_data_from = mapped_file.find(search_dict['section'].encode('ascii'))
mapped_file.seek(pos_data_from)
#---readline and there no matter what the file index should point to the beginning of the binary data
mapped_file.readline()
if verbose>1:
print(f"pos_type_from: {pos_type_from}\npos_data_from: {pos_data_from}\ntime_patch: {time_patch}")
return mapped_file, time_patch
def process(self,file_array,to_csv = False, verbose=0):
"""
Perform the processing of the files
Parameters:
- file_array: Array of file-paths
- to_csv: if true, saves files under it's file name with .csv extension
- verbose: verbosity
"""
full_info = False # for regular application the channel, edge, tag and fifo info are constant so they don't have to be saved. In that case keep full_info = False
self.files = file_array
for filename in self.files:
with open(filename,'rb') as listfile:
binary, time_patch = self.get_time_patch_and_binary(listfile, verbose=verbose)
if full_info:
converted_data = self.decode_binary(binary,time_patch,verbose=verbose) # np.array with tof, sweep, channel, edge, tag, fifo
header_res ='tof,sweep,channel,edge,tag,fifo'
if to_csv:
np.savetxt('{}/{}.csv'.format(os.path.split(filename)[0],os.path.splitext(os.path.basename(filename))[0]),converted_data,
fmt = '%i,%i,%i,%i,%f,%f', header = header_res)
else:
converted_data = pd.DataFrame(self.decode_binary(binary,time_patch,verbose)[:, [0,1]], columns=['tof', 'sweep']) # saves only tof and sweep info
converted_data.tof = converted_data.tof/10 # 100ps -> ns
converted_data.sweep = converted_data.sweep.astype('int64') # sweep is only int
if to_csv:
converted_data.to_csv('{}/{}.csv'.format(os.path.split(filename)[0],os.path.splitext(os.path.basename(filename))[0]), index=False)
print('File {} loaded successfully!'.format(os.path.splitext(os.path.basename(filename))[0]))
self.df_dict[os.path.splitext(os.path.basename(filename))[0]] = converted_data
if full_info == False:
return(pd.concat(self.df_dict, axis=1)) # convert dict of dataframes into one dataframe with two column name levels
class MPANTMpa(ProcessorBase):
"""
Original from https://gitlab.cern.ch/datanaso/dEval/-/blob/master/etc/mpant.py
Class handling the MPANT (mpa) data files!
Data format is 'asc'
"""
def __init__(self):
ProcessorBase.__init__(self) # self.map_files = file_array
def read(self, mpa):
"""
Read mpa data file
:return:
"""
with open(mpa, 'r') as f:
fs = f.read()
name = os.path.basename(mpa).split('.')[0]
raw_header, raw_data = fs.split('[DATA]\n')
if bool(raw_data) and len(raw_data.split(' ')) >= 9:
self.parse_header(name, raw_header)
self.df_dict[name] = pd.read_csv(StringIO(raw_data), delimiter=' ', usecols=(0, 1, 2), header=None, names=['tof', 'sweep', 'counts'])
self.data[name] = self.df_dict[name].to_numpy()
def parse_header(self, key, txt):
parser = CustomParser(strict=False)
parser.read_file(StringIO(txt))
tmp = parser.as_dict()
self.pars[key] = dict(**tmp['CHN1'])
self.pars[key].update(**tmp['MPA4A'])
def process(self, files, to_csv = False):
'''
Processes MPA file.
Parameters:
- files: array of files to be processed
- to_csv: Defaults to False; if true, saves the processed files
Return:
- pars: dictionary of measurement parameters
- data: 2D array of processed data
- data_dict: array of panda dataframes with data
'''
self.files = files
for i, f in enumerate(self.files):
self.read(f)
# Convert bins to tof in ns
name = os.path.basename(f).split('.')[0]
self.df_dict[name].tof = self.df_dict[name].tof * float(self.pars[name]['calfact']) + float(self.pars[name]['caloff'])
if to_csv:
self.df_dict[name].to_csv('{}/{}.csv'.format(os.path.split(f)[0],os.path.splitext(os.path.basename(f))[0]), index=False)
#
return self.pars, self.data, pd.concat(self.df_dict, axis=1)
class MCDWIN887(ProcessorBase):
"""
Original from: https://gitlab.cern.ch/datanaso/dEval/-/blob/master/etc/mcdwin.py
Class handling the MCDWIN (.887) data files!
Pass the .887 file and the data format is 'csv, asc' is handled automatically.
"""
def __init__(self):
ProcessorBase.__init__(self) # self.map_files = file_array
def get_signals(self):
return self.next_887, self.status_887
def read(self, p887):
"""
Read mpa data file
:return:
"""
self.parse_header(p887)
folder = os.path.dirname(p887) + os.path.sep
name = os.path.basename(p887).split('.')[0]
if self.pars[name]['sweepmode'] == '80' and self.pars[name]['fmt'] == 'csv':
self.df_dict[name] = self.read_csv_80(folder, name)
elif self.pars[name]['sweepmode'] == '84' and self.pars[name]['fmt'] == 'csv':
self.df_dict[name] = self.read_csv_84(folder, name)
elif self.pars[name]['sweepmode'] == '84' and self.pars[name]['fmt'] == 'asc':
self.df_dict[name] = self.read_asc_84(folder, name)
if self.df_dict[name]['counts'].sum() != 0:
self.data[name] = self.df_dict[name][self.df_dict[name]['counts'] > 0].to_numpy()
else:
self.data[name] = self.df_dict[name][0:1].to_numpy()
def read_csv_80(self, folder, name):
self.pars[name]['cycles'] = '1'
fname = folder + name + f'.{self.pars[name]["fmt"]}'
df = | pd.read_csv(fname, sep='\t', header=None, dtype=float, prefix='Y') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
import nose
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas.lib import Timestamp
from pandas.compat import StringIO
class UsecolsTests(object):
def test_raise_on_mixed_dtype_usecols(self):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
msg = ("The elements of 'usecols' must "
"either be all strings, all unicode, or all integers")
usecols = [0, 'b', 2]
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(data), usecols=usecols)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# see gh-5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_usecols_index_col_False(self):
# see gh-9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_usecols_index_col_conflict(self):
# see gh-4201: test that index_col as integer reflects usecols
data = 'a,b,c,d\nA,a,1,one\nB,b,2,two'
expected = DataFrame({'c': [1, 2]}, index=Index(
['a', 'b'], name='b'))
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'b': ['a', 'b'], 'c': [1, 2], 'd': ('one', 'two')})
expected = expected.set_index(['b', 'c'])
df = self.read_csv(StringIO(data), usecols=['b', 'c', 'd'],
index_col=['b', 'c'])
tm.assert_frame_equal(expected, df)
def test_usecols_implicit_index_col(self):
# see gh-2654
data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10'
result = self.read_csv(StringIO(data), usecols=['a', 'b'])
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(self):
# see gh-2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
df = self.read_csv(StringIO(data), sep='\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(df, expected)
def test_usecols_with_whitespace(self):
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
result = self.read_csv(StringIO(data), delim_whitespace=True,
usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_integer_like_header(self):
data = """2,0,1
1000,2000,3000
4000,5000,6000
"""
usecols = [0, 1] # column selection by index
expected = DataFrame(data=[[1000, 2000],
[4000, 5000]],
columns=['2', '0'])
df = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['0', '1'] # column selection by name
expected = DataFrame(data=[[2000, 3000],
[5000, 6000]],
columns=['0', '1'])
df = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_usecols_with_parse_dates(self):
# See gh-9755
s = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
df = self.read_csv(StringIO(s), usecols=[0, 2, 3],
parse_dates=parse_dates)
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
"""
Same as simple_model_2, but use ensemble model.
"""
from sklearn_pandas import DataFrameMapper, CategoricalImputer
import logging
from ..Loader import Loader
from ..Paths import DICT_PATHS
import pandas as pd
from sklearn.pipeline import make_pipeline
from ..sk_util import CategoricalEncoder
from sklearn.preprocessing import Imputer, FunctionTransformer, StandardScaler
from sklearn.preprocessing import LabelBinarizer, OneHotEncoder
from sklearn_pandas import gen_features
loader = Loader()
df_train = loader.read_original_data(table_code='train')
df_test = loader.read_original_data(table_code='test')
# Consider only a subset of columns
df_train.set_index('PassengerId', inplace=True)
df_test.set_index('PassengerId', inplace=True)
USE_COLS = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
TARGET = ['Survived']
X_train = df_train[USE_COLS].copy()
y_train = df_train[TARGET].copy().values.reshape(-1,)
X_test = df_test[USE_COLS].copy()
# Preprocessing
# 1. 1-hot encode categorical columns
feature_cat = gen_features(columns=['Pclass', 'Sex', 'Embarked'],
classes=[CategoricalImputer, {'class': FunctionTransformer,
'func': pd.get_dummies,
'validate':False}]
)
feature_num = gen_features(columns=[['Age'], ['SibSp'], ['Parch'], ['Fare']],
classes=[Imputer, StandardScaler])
mapper = DataFrameMapper(
feature_cat + feature_num,
input_df=True, df_out=True)
X_train_fit = mapper.fit_transform(X_train.copy())
# Training
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier, BaggingClassifier, RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
import scipy
# Hyperparameters
param_grid_GB = {
'learning_rate': scipy.stats.uniform(loc=0.01, scale=0.15),
'max_depth': scipy.stats.randint(low=2, high=X_train_fit.shape[1]),
'min_samples_split': scipy.stats.uniform(loc=0.001, scale=0.02),
'min_samples_leaf': scipy.stats.uniform(loc=0.01, scale=0.2),
'subsample': scipy.stats.uniform(loc=0.5, scale=0.4),
'max_features': ['auto', 'sqrt', 'log2', None],
}
param_grid_LR = {
'penalty': ['l1', 'l2'],
'C': scipy.stats.expon(scale=10)
}
param_grid_LRCV = {}
param_RF = {
'n_estimators': scipy.stats.randint(5, 1000),
'max_features': ['auto', 'sqrt'],
'max_depth': scipy.stats.randint(10, 100),
'min_samples_leaf': scipy.stats.randint(1, 4),
'min_samples_split': scipy.stats.randint(2, 10),
'bootstrap': [True, False]
}
params = param_RF
model = RandomForestClassifier()
clf = RandomizedSearchCV(model, param_distributions=params, cv=5, verbose=1, n_jobs=-1, n_iter=200)
logging.info("Training...")
best_model = clf.fit(X_train_fit, y_train)
# Print best results (rank by high test score and low std)
train_result = | pd.DataFrame(clf.cv_results_) | pandas.DataFrame |
import sys
import pandas as pd
import numpy as np
import pickle
import json
import util
def recommend_settings(model_dump, survey_json):
model = pickle.loads(model_dump)
survey_answers = pd.DataFrame(json.loads(survey_json)).T
measures = util.calc_measures_from_survey(survey_answers)
def search_settings_combinations(combinations, settings):
column = util.SETTING_COLUMNS[len(settings)]
for setting_value in util.SETTINGS_CATEGORIES[column]:
new_settings = settings.copy()
new_settings[column] = setting_value
if len(new_settings) == len(util.SETTING_COLUMNS):
combinations.append(new_settings)
else:
search_settings_combinations(combinations, new_settings)
settings_combinations = []
search_settings_combinations(settings_combinations, {})
predictions = | pd.DataFrame() | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), | Timestamp("20130101 9:02") | pandas.Timestamp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Analyze SON scan csv file. You can run this as a script.
Optional argument of script is a slice (notation 0:) or
list of indices (comma-soparated, e.g. 0,1,-2,-1).
Copyright <NAME> (2022) - Twitter: @hk_nien
License: MIT.
Created on Sat Feb 5 23:28:03 2022
"""
import sys
import os
from pathlib import Path
import re
import datetime
import pandas as pd
import numpy as np
def _get_1csv_df(csv_fname):
"""Load csv, return df; handle data without api_version, all_slots column"""
df = pd.read_csv(csv_fname, comment='#')
if 'api_version' not in df.columns:
df['api_version'] = 1
if 'xfields' not in df.columns:
df['xfields'] = ''
else:
df.loc[df['xfields'].isna(), 'xfields'] = ''
if 'all_slots' not in df.columns:
df['all_slots'] = ''
else:
df.loc[df['all_slots'].isna(), 'all_slots'] = ''
return df
def get_csv_as_dataframe(csv_fname='data-son/son_scan-latest.csv'):
"""Load CSV file(s) and do minor preprocessing.
Parameters:
- csv_fname: CSV filename (str) or list of str.
Return:
- df: DataFrame with CSV contents; timestamps converted to pandas Timestamp.
- scan_times: list of scan start times (Timestamps). Use this for
slicing the DataFrame into separate scans.
Note: csv files will be put into chronological order, but it won't handle
overlapping ranges for 'scan_time'.
"""
if isinstance(csv_fname, (str, Path)):
csv_fnames = [csv_fname]
else:
csv_fnames = list(csv_fname)
df_list = [_get_1csv_df(fn) for fn in csv_fnames]
df_list = sorted(df_list, key=lambda df: df.iloc[0]['scan_time'])
df = pd.concat(df_list).reset_index().drop(columns='index')
df['scan_time'] = | pd.to_datetime(df['scan_time']) | pandas.to_datetime |
"""
Define a set of classes that function like forecasters, akin
to the R forecast package.
"""
import copy
import itertools
from typing import List, Tuple, Callable
import pandas as pd
import numpy as np
import scipy.linalg as spla
import tensorly as tl
from scipy.fftpack import rfft, irfft, dct
from tensorly.decomposition import parafac, tucker
from .utils import mad, multifold, rmse
TENSOR_MAX_ITER = 500
class ForecasterResult:
def __init__(
self,
inputs: pd.Series,
forecaster: "SeasonalForecaster",
in_sample_approx: pd.Series,
forecast: pd.Series,
in_errors: List[float],
out_errors: List[float],
nr_total_params: int,
):
self.inputs = inputs
self.forecaster = forecaster
self.in_sample_approx = in_sample_approx
self.forecast = forecast
self.in_errors = in_errors
self.out_errors = out_errors
self.nr_total_params = nr_total_params
class SeasonalForecaster:
def __init__(
self,
nr_params: int,
folds: Tuple[int],
error_callbacks: Tuple[Callable] = (rmse, mad),
):
"""
:param nr_params: number of parameters for the forecaster. specific definitions change
by forecaster.
:param folds: a tuple representing the folds of the seasonality. faster period comes first.
i.e., (24, 7) not (7, 24)
:param error_callbacks:
"""
self.nr_params = nr_params
self.folds = folds
self.error_callbacks = error_callbacks
self.nr_total_params = self.nr_params
def run_forecast(self, vals: pd.Series, nr_in_cycles: int) -> Tuple[np.ndarray, np.ndarray]:
raise NotImplemented()
def __call__(self, vals: pd.Series, nr_in_cycles: int, **kwargs):
assert nr_in_cycles > 1, "number of cycles in sample must be > 1"
assert nr_in_cycles * np.prod(self.folds) > len(vals) / 2, (
"provide more data in sample then out of sample"
)
in_sample_approx, forecast = self.run_forecast(vals, nr_in_cycles)
nr_in_steps = int(nr_in_cycles * np.prod(self.folds))
data_in, data_out = (
vals.values[:nr_in_steps], vals.values[nr_in_steps:]
)
in_errors = [
callback(data_in, in_sample_approx) for callback in self.error_callbacks
]
out_errors = [
callback(data_out, forecast) for callback in self.error_callbacks
]
return ForecasterResult(
inputs=vals,
forecaster=self,
in_sample_approx= | pd.Series(in_sample_approx, index=vals.index[:nr_in_steps]) | pandas.Series |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
import logging
import numpy as np
import pandas as pd
import utils.boxes as box_utils
import utils.keypoints as keypoint_utils
from core.config import cfg
from .json_dataset import JsonDataset
from datasets.json_dataset import JsonDataset
from .crop import crop_support
logger = logging.getLogger(__name__)
def combined_roidb_for_training(dataset_names, proposal_files):
"""Load and concatenate roidbs for one or more datasets, along with optional
object proposals. The roidb entries are then prepared for use in training,
which involves caching certain types of metadata for each roidb entry.
"""
def get_roidb(dataset_name, proposal_file):
ds = JsonDataset(dataset_name)
roidb = ds.get_roidb(
gt=True,
proposal_file=proposal_file,
crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH
)
if cfg.TRAIN.USE_FLIPPED:
logger.info('Appending horizontally-flipped training examples...')
extend_with_flipped_entries(roidb, ds)
logger.info('Loaded dataset: {:s}'.format(ds.name))
return roidb
if isinstance(dataset_names, six.string_types):
dataset_names = (dataset_names, )
if isinstance(proposal_files, six.string_types):
proposal_files = (proposal_files, )
if len(proposal_files) == 0:
proposal_files = (None, ) * len(dataset_names)
assert len(dataset_names) == len(proposal_files)
roidbs = [get_roidb(*args) for args in zip(dataset_names, proposal_files)]
original_roidb = roidbs[0]
# new dataset split according to class
roidb = []
for item in original_roidb:
gt_classes = list(set(item['gt_classes'])) # distinct gt classes
all_cls = np.array(item['gt_classes'])
for cls in gt_classes:
item_new = item.copy()
target_idx = np.where(all_cls == cls)[0] # array([ 0, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12]), first element in tuple
#item_new['id'] = item_new['id'] * 1000 + int(cls)
item_new['target_cls'] = int(cls)
item_new['boxes'] = item_new['boxes'][target_idx]
item_new['max_classes'] = item_new['max_classes'][target_idx]
item_new['gt_classes'] = item_new['gt_classes'][target_idx]
item_new['is_crowd'] = item_new['is_crowd'][target_idx]
item_new['segms'] = item_new['segms'][:target_idx.shape[0]]
item_new['seg_areas'] = item_new['seg_areas'][target_idx]
item_new['max_overlaps'] = item_new['max_overlaps'][target_idx]
item_new['box_to_gt_ind_map'] = np.array(range(item_new['gt_classes'].shape[0]))
item_new['gt_overlaps'] = item_new['gt_overlaps'][target_idx]
roidb.append(item_new)
for r in roidbs[1:]:
roidb.extend(r)
roidb = filter_for_training(roidb)
if cfg.TRAIN.ASPECT_GROUPING or cfg.TRAIN.ASPECT_CROPPING:
logger.info('Computing image aspect ratios and ordering the ratios...')
ratio_list, ratio_index, cls_list, id_list = rank_for_training(roidb)
logger.info('done')
else:
ratio_list, ratio_index, cls_list, id_list = None, None, None, None
logger.info('Computing bounding-box regression targets...')
add_bbox_regression_targets(roidb)
logger.info('done')
_compute_and_log_stats(roidb)
print(len(roidb))
return roidb, ratio_list, ratio_index, cls_list, id_list
def extend_with_flipped_entries(roidb, dataset):
"""Flip each entry in the given roidb and return a new roidb that is the
concatenation of the original roidb and the flipped entries.
"Flipping" an entry means that that image and associated metadata (e.g.,
ground truth boxes and object proposals) are horizontally flipped.
"""
flipped_roidb = []
for entry in roidb:
width = entry['width']
boxes = entry['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = width - oldx2 - 1
boxes[:, 2] = width - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
flipped_entry = {}
dont_copy = ('boxes', 'segms', 'gt_keypoints', 'flipped')
for k, v in entry.items():
if k not in dont_copy:
flipped_entry[k] = v
flipped_entry['boxes'] = boxes
#flipped_entry['segms'] = segm_utils.flip_segms(
# entry['segms'], entry['height'], entry['width']
#)
flipped_entry['segms'] = entry['segms']
if dataset.keypoints is not None:
flipped_entry['gt_keypoints'] = keypoint_utils.flip_keypoints(
dataset.keypoints, dataset.keypoint_flip_map,
entry['gt_keypoints'], entry['width']
)
flipped_entry['flipped'] = True
flipped_roidb.append(flipped_entry)
roidb.extend(flipped_roidb)
def filter_for_training(roidb):
"""Remove roidb entries that have no usable RoIs based on config settings.
"""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
if cfg.MODEL.KEYPOINTS_ON:
# If we're training for keypoints, exclude images with no keypoints
valid = valid and entry['has_visible_keypoints']
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
logger.info('Filtered {} roidb entries: {} -> {}'.
format(num - num_after, num, num_after))
return filtered_roidb
def rank_for_training(roidb):
"""Rank the roidb entries according to image aspect ration and mark for cropping
for efficient batching if image is too long.
Returns:
ratio_list: ndarray, list of aspect ratios from small to large
ratio_index: ndarray, list of roidb entry indices correspond to the ratios
"""
RATIO_HI = cfg.TRAIN.ASPECT_HI # largest ratio to preserve.
RATIO_LO = cfg.TRAIN.ASPECT_LO # smallest ratio to preserve.
need_crop_cnt = 0
ratio_list = []
cls_list = []
id_list = []
for entry in roidb:
width = entry['width']
height = entry['height']
ratio = width / float(height)
target_cls = entry['target_cls']
img_id = entry['id'] #int(str(entry['id'])[:-3])
if cfg.TRAIN.ASPECT_CROPPING:
if ratio > RATIO_HI:
entry['need_crop'] = True
ratio = RATIO_HI
need_crop_cnt += 1
elif ratio < RATIO_LO:
entry['need_crop'] = True
ratio = RATIO_LO
need_crop_cnt += 1
else:
entry['need_crop'] = False
else:
entry['need_crop'] = False
ratio_list.append(ratio)
cls_list.append(target_cls)
id_list.append(img_id)
if cfg.TRAIN.ASPECT_CROPPING:
logging.info('Number of entries that need to be cropped: %d. Ratio bound: [%.2f, %.2f]',
need_crop_cnt, RATIO_LO, RATIO_HI)
ratio_list = np.array(ratio_list)
ratio_index = np.argsort(ratio_list)
cls_list = np.array(cls_list)
id_list = np.array(id_list)
return ratio_list[ratio_index], ratio_index, cls_list, id_list
def add_bbox_regression_targets(roidb):
"""Add information needed to train bounding-box regressors."""
for entry in roidb:
entry['bbox_targets'] = _compute_targets(entry)
def _compute_targets(entry):
"""Compute bounding-box regression targets for an image."""
# Indices of ground-truth ROIs
rois = entry['boxes']
overlaps = entry['max_overlaps']
labels = entry['max_classes']
gt_inds = np.where((entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
# Targets has format (class, tx, ty, tw, th)
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
if len(gt_inds) == 0:
# Bail if the image has no ground-truth ROIs
return targets
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = box_utils.bbox_overlaps(
rois[ex_inds, :].astype(dtype=np.float32, copy=False),
rois[gt_inds, :].astype(dtype=np.float32, copy=False))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
# Use class "1" for all boxes if using class_agnostic_bbox_reg
targets[ex_inds, 0] = (
1 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else labels[ex_inds])
targets[ex_inds, 1:] = box_utils.bbox_transform_inv(
ex_rois, gt_rois, cfg.MODEL.BBOX_REG_WEIGHTS)
return targets
def _compute_and_log_stats(roidb):
classes = roidb[0]['dataset'].classes
char_len = np.max([len(c) for c in classes])
hist_bins = np.arange(len(classes) + 1)
# Histogram of ground-truth objects
gt_hist = np.zeros((len(classes)), dtype=np.int)
for entry in roidb:
gt_inds = np.where(
(entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
gt_classes = entry['gt_classes'][gt_inds]
gt_hist += np.histogram(gt_classes, bins=hist_bins)[0]
logger.debug('Ground-truth class histogram:')
for i, v in enumerate(gt_hist):
logger.debug(
'{:d}{:s}: {:d}'.format(
i, classes[i].rjust(char_len), v))
logger.debug('-' * char_len)
logger.debug(
'{:s}: {:d}'.format(
'total'.rjust(char_len), np.sum(gt_hist)))
def get_roidb_and_dataset(dataset_name, test_range):
"""
Get the roidb for the dataset specified in the global cfg. Optionally
restrict it to a range of indices if test_range is a pair of integers.
"""
dataset = JsonDataset(dataset_name)
original_roidb, roidb = dataset.get_roidb(gt=True, test_flag=True)
# construct support image crops with bounding box
support_roidb = []
cnt = 0
for item_id, item in enumerate(original_roidb):
gt_classes = list(set(item['gt_classes']))
for cls in gt_classes:
item_new = item.copy()
item_new['target_cls'] = int(cls)
all_cls = item['gt_classes']
target_idx = np.where(all_cls == cls)[0]
item_new['boxes'] = item['boxes'][target_idx]
item_new['gt_classes'] = item['gt_classes'][target_idx]
item_new['index'] = cnt
item_new['real_index'] = item_id
cnt += 1
support_roidb.append(item_new)
print('support annotation number: ', len(support_roidb))
roidb_img = []
roidb_cls = []
roidb_index = []
for item_id, item in enumerate(support_roidb):
roidb_img.append(item['image'])
roidb_cls.append(item['target_cls'])
roidb_index.append(item['index'])
assert item_id == item['index']
data_dict = {'img_ls': roidb_img, 'cls_ls': roidb_cls, 'index': roidb_index}
# construct dataframe for picking support images
support_df = | pd.DataFrame.from_dict(data_dict) | pandas.DataFrame.from_dict |
from .handler import function_handler
import yaml
import pytest
import pandas as pd
import numpy as np
from packaging import version
def transform_setup(function):
# read in file infos
with open("tests/test_yamls/test_transform.yml", "r") as stream:
file_infos = yaml.safe_load(stream)
if function == "decompress-content":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "decompress-content"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
# create input & output dfs
f = open(input_file, "rb")
binary_str = f.read()
input_df = pd.DataFrame({0: [binary_str]})
output_df = pd.read_csv(output_file)
transform_infos.append((input_df, output_df))
return transform_infos
if function == "transform-to-dataframe":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "transform-to-dataframe"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
str_type = info['str_type']
# create input & output dfs
input_df = pd.read_csv(input_file)
output_df = pd.read_csv(output_file)
transform_infos.append((input_df, output_df, str_type))
return transform_infos
if function == "split-dataframe-rows":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "split-dataframe-rows"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
# create input & output dfs
input_df = pd.read_csv(input_file, header=None)
output_dfs = [pd.read_csv(output_df) for output_df in info['output_files']]
transform_infos.append((input_df, output_dfs))
return transform_infos
if function == "flatten-lists-to-dataframe":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "flatten-lists-to-dataframe"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
extract_field = info['extract_field']
preserve_origin_data = info['preserve_origin_data']
# create input & output dfs
input_df = pd.read_csv(input_file)
output_df = pd.read_csv(output_file)
transform_infos.append((input_df, output_df, extract_field, preserve_origin_data))
return transform_infos
if function == "string-injecting":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "string-injecting"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
# create input & output dfs
input_df = pd.read_csv(input_file, names=[0, 'gem_name'])
output_df = pd.read_csv(output_file)
transform_infos.append((input_df, output_df))
return transform_infos
if function == "rename-columns":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "rename-columns"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
map_dict = info['rename_map']
# create input & output dfs
input_df = pd.read_csv(input_file)
output_df = pd.read_csv(output_file)
transform_infos.append((input_df, output_df, map_dict))
return transform_infos
if function == "json-array-to-dataframe":
extract_infos = [file_info for file_info in file_infos if file_info['function'] == "json-array-to-dataframe"]
transform_infos = []
for info in extract_infos:
# get file infos
input_file = info['input_file']
output_file = info['output_file']
headers = info['headers']
extract_field = info['extract_field']
# create input & output dfs
input_df = | pd.read_csv(input_file) | pandas.read_csv |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series( | pd.date_range("20130101", periods=5) | pandas.date_range |
import pandas as pd
import abc
class BaseField(abc.ABC):
def __init__(self, dtype=None, admits_null=True, admits_empty=True):
self._admits_null = bool(admits_null)
self._admits_empty = bool(admits_empty)
self._dtype = dtype
@property
def dtype(self):
return self._dtype
@property
def nullable(self):
"""
True if the field admits null values
"""
return self._admits_null
@property
def admits_empty(self):
return self._admits_empty
@abc.abstractmethod
def parse(self, value):
"""
Returns the actual value of the field
that should be stored in the data source.
Can also do some sort of validation.
"""
pass
def __call__(self, value):
is_null = | pd.isnull(value) | pandas.isnull |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
ImportExtinctionRecallTaskLog.py
Import, tabulate, and plot Extinction Recall 3 log data.
Created 1/3/19 by DJ.
Updated 1/10/19 by DJ - adjusted to new VAS logging format, added GetVasTypes.
Updated 1/11/19 by DJ - bug fixes, comments.
Updated 2/25/19 by DJ - renamed PostDummyRun to PostRun3, stopped assuming sound check response was a float.
Updated 4/12/19 by DJ - updated to work from command line and with new task version (adding Sound VAS).
Updated 5/2/19 by DJ - added function to write BIDs-formatted events files,
added --makeBids flag to argparser, added run & tEnd columns to dfBlock.
Updated 9/24/19 by DJ - accommodate ratingscales with no locked-in response, removed old/redundant mood VAS categorization.
"""
# Import packages
import time # for timing analyses
import numpy as np # for math
import pandas as pd # for tables
from matplotlib import pyplot as plt # for plotting
import ast # for parameter parsing
import re # for splitting strings
import argparse # for command-line arguments
from glob import glob # for finding files
import os # for handling paths
# Import full log (including keypresses)
def ImportExtinctionRecallTaskLog(logFile):
# === Read in PsychoPy log
# Log start
print('Reading file %s...'%logFile)
t = time.time()
# Load file
with open(logFile) as f:
allLines = f.read().splitlines(True)
# Set up outputs
dfKey = pd.DataFrame(columns=['t','key'])
dfDisp = pd.DataFrame(columns=['t','stim','CS'])
dfSync = pd.DataFrame(columns=['t','value'])
dfBlock = pd.DataFrame(columns=['tStart','tEnd','type','run'])
dfVas = pd.DataFrame(columns=['imageFile','CSplusPercent','type','name','rating','timeToFirstPress','RT','run','group','block','trial','tImage','tStart','tEnd'])
params = {}
iKey = 0;
iDisp = 0;
iSync = 0;
iVas = 0;
iBlock = -1;
run = 0; # 1-based numbering
group = 0
block = 0
trial = 0
isParams = False;
# Read each line
for line in allLines:
# split into parts
data = line.split()
# Find params
if 'START PARAMETERS' in line:
isParams = True;
elif 'END PARAMETERS' in line:
isParams = False;
# Parse params
elif isParams: # parse parameter
key = data[2][:-1] # name of parameter
if len(data)==4:
try:
params[key] = float(data[3]) # if it's a number, convert to a float
except ValueError:
params[key] = data[3] # otherwise, record the string
elif data[3].startswith("["):
params[key] = ast.literal_eval(''.join(data[3:])) # if the parameter is a list, make it a list variable
else:
params[key] = ' '.join(data[3:])
# Parse data
elif len(data)>2:
if data[2]=='Keypress:': # time and key pressed
dfKey.loc[iKey,'t'] = float(data[0])
dfKey.loc[iKey,'key'] = data[3]
iKey +=1;
elif data[2]=='Display': # time and stim presented
dfDisp.loc[iDisp,'t'] = float(data[0])
dfDisp.loc[iDisp,'stim'] = data[3]
if len(data)>4: # if a CS level is specified...
trial +=1
dfDisp.loc[iDisp,'CS'] = data[4] # log it
# set VAS stimulus and type
dfVas.loc[iVas,'tImage'] = dfDisp.loc[iDisp,'t']
dfVas.loc[iVas,'imageFile'] = dfDisp.loc[iDisp,'stim']
dfVas.loc[iVas,'CSplusPercent'] = int(dfDisp.loc[iDisp,'CS'][6:])
dfVas.loc[iVas,'type'] = dfBlock.loc[iBlock,'type']
iDisp +=1;
elif data[2]=='set': # message time and text
dfSync.loc[iSync,'t'] = float(data[0])
dfSync.loc[iSync,'value'] = float(data[-1])
iSync +=1;
elif data[2]=='=====' and data[3]=='START' and data[4]=='RUN':
run +=1
elif data[2]=='====' and data[3]=='START' and data[4]=='GROUP':
group = int(data[5][0])
elif data[2]=='===' and data[3]=='START' and data[4]=='BLOCK': # block start time
block = int(data[5][0])
trial = 0
iBlock +=1;
dfBlock.loc[iBlock,'tStart'] = float(data[0])
dfBlock.loc[iBlock,'run'] = run
elif data[2]=='===' and data[3]=='END' and data[4]=='BLOCK': # block end time
dfBlock.loc[iBlock,'tEnd'] = float(data[0])
elif data[2]=='bottomMsg:':
if 'AFRAID' in line:
dfBlock.loc[iBlock,'type'] = 'afraid'
elif 'SCREAM' in line:
dfBlock.loc[iBlock,'type'] = 'scream'
elif data[2]=='RatingScale': # VAS time, rating, RT
if "rating=" in line:
dfVas.loc[iVas,'tStart'] = dfDisp.loc[iDisp-1,'t']
dfVas.loc[iVas,'tEnd'] = float(data[0])
dfVas.loc[iVas,'name'] = data[3][:-1]
value = float(data[-1].split("=")[-1])
dfVas.loc[iVas,'rating'] = value
# if it's an image vas, set indices
if dfVas.loc[iVas,'type'] in ['afraid','scream']:
dfVas.loc[iVas,'run'] = run
dfVas.loc[iVas,'group'] = group
dfVas.loc[iVas,'block'] = block
dfVas.loc[iVas,'trial'] = trial
# if the response timed out, advance without RT/history
if "timed out" in line:
dfVas.loc[iVas,'RT'] = np.nan;
# infer time to first keypress from
iKeys = np.where((dfKey.t>dfVas.loc[iVas,'tStart']) & (dfKey.key!=str(params['triggerKey'])[0]))[0]
if len(iKeys)>0:
dfVas.loc[iVas,'timeToFirstPress'] = dfKey.loc[iKeys[0],'t'] - dfVas.loc[iVas,'tStart'];
else:
dfVas.loc[iVas,'timeToFirstPress'] = np.nan;
if dfVas.loc[iVas,'type'] in ['afraid','scream']:
print('WARNING: image rating scale at t=%g (run %d group %d block %d trial %d) timed out! RT will be set to NaN, timeToFirstPress inferred from key-display interval.'%(dfVas.loc[iVas,'tStart'],run,group,block,trial))
else:
print('WARNING: mood rating scale at t=%g timed out! RT will be set to NaN, timeToFirstPress inferred from key-display interval.'%(dfVas.loc[iVas,'tStart']))
# increment VAS index
iVas +=1;
elif "RT=" in line:
value = float(data[-1].split("=")[-1])
dfVas.loc[iVas,'RT'] = value
elif "history=" in line:
# get time to first button presss
if len(re.split('\), |, |\)]',line))>3:
timeToPress = float(re.split('\), |, |\)]',line)[3])
else:
timeToPress = dfVas.loc[iVas,'RT'] # if no press, default to RT
dfVas.loc[iVas,'timeToFirstPress'] = timeToPress
# increment VAS index
iVas +=1;
print('Done! Took %.1f seconds.'%(time.time()-t))
print('Extracting VAS data...')
t = time.time()
# Parse out mood and sound VAS results
dfMoodVas = dfVas.loc[pd.isnull(dfVas['imageFile']),:]
dfMoodVas = dfMoodVas.drop(['imageFile','CSplusPercent','run','group','block','trial','tImage'],1)
# split into mood & sound
dfSoundVas = dfMoodVas.loc[dfMoodVas.name.str.startswith('SoundCheck'),:]
dfMoodVas = dfMoodVas.loc[~dfMoodVas.name.str.startswith('SoundCheck'),:]
# reset indices
dfSoundVas = dfSoundVas.reset_index(drop=True)
dfMoodVas = dfMoodVas.reset_index(drop=True)
# Parse out image VAS results
dfImageVas = dfVas.loc[pd.notnull(dfVas['imageFile']),:]
dfImageVas = dfImageVas.drop('name',1)
# add Mood VAS types
isTraining = 'Training' in logFile
dfMoodVas = GetVasTypes(params,dfMoodVas,isTraining)
# add Sound VAS types (assuming only one question per sound!!!)
dfSoundVas['group']=np.arange(dfSoundVas.shape[0])
dfSoundVas['groupName']=[x.split('-')[0] for x in dfSoundVas.name]
dfSoundVas['type']='loud'
print('Done! Took %.1f seconds.'%(time.time()-t))
# Return results
return params, dfMoodVas, dfSoundVas, dfImageVas, dfKey, dfDisp, dfSync, dfBlock
# Import VAS parts of log (excluding keypresses)
def ImportExtinctionRecallTaskLog_VasOnly(logFile):
# === Read in PsychoPy log
# Log start
print('Reading file %s...'%logFile)
t = time.time()
# Load file
with open(logFile) as f:
allLines = f.read().splitlines(True)
# Set up outputs
dfDisp = pd.DataFrame(columns=['t','stim','CS'])
dfBlock = pd.DataFrame(columns=['tStart','tEnd','type'])
dfVas = pd.DataFrame(columns=['imageFile','CSplusPercent','type','name','rating','timeToFirstPress','RT','run','group','block','trial','tImage','tStart','tEnd'])
params = {}
iDisp = 0;
iVas = 0;
iBlock = -1;
run = 0; # 1-based numbering
group = 0
block = 0
trial = 0
isParams = False;
# Read each line
for line in allLines:
# split into parts
data = line.split()
# Find params
if 'START PARAMETERS' in line:
isParams = True;
elif 'END PARAMETERS' in line:
isParams = False;
# Parse params
elif isParams: # parse parameter
key = data[2][:-1] # name of parameter
if len(data)==4:
try:
params[key] = float(data[3]) # if it's a number, convert to a float
except ValueError:
params[key] = data[3] # otherwise, record the string
elif data[3].startswith("["):
params[key] = ast.literal_eval(''.join(data[3:])) # if the parameter is a list, make it a list variable
else:
params[key] = ' '.join(data[3:])
# Parse data
elif len(data)>2:
if data[2]=='Display': # time and stim presented
dfDisp.loc[iDisp,'t'] = float(data[0])
dfDisp.loc[iDisp,'stim'] = data[3]
if len(data)>4: # if a CS level is specified...
trial +=1
dfDisp.loc[iDisp,'CS'] = data[4] # log it
# set VAS stimulus and type
dfVas.loc[iVas,'tImage'] = dfDisp.loc[iDisp,'t']
dfVas.loc[iVas,'imageFile'] = dfDisp.loc[iDisp,'stim']
dfVas.loc[iVas,'CSplusPercent'] = int(dfDisp.loc[iDisp,'CS'][6:])
dfVas.loc[iVas,'type'] = dfBlock.loc[iBlock,'type']
iDisp +=1;
elif data[2]=='=====' and data[3]=='START' and data[4]=='RUN':
run +=1
elif data[2]=='====' and data[3]=='START' and data[4]=='GROUP':
group = int(data[5][0])
elif data[2]=='===' and data[3]=='START' and data[4]=='BLOCK': # block start time
block = int(data[5][0])
trial = 0
iBlock +=1;
dfBlock.loc[iBlock,'tStart'] = float(data[0])
dfBlock.loc[iBlock,'run'] = run
elif data[2]=='===' and data[3]=='END' and data[4]=='BLOCK': # block end time
dfBlock.loc[iBlock,'tEnd'] = float(data[0])
elif data[2]=='bottomMsg:':
if 'AFRAID' in line:
dfBlock.loc[iBlock,'type'] = 'afraid'
elif 'SCREAM' in line:
dfBlock.loc[iBlock,'type'] = 'scream'
elif data[2]=='RatingScale': # VAS time, rating, RT
if "rating=" in line:
dfVas.loc[iVas,'tStart'] = dfDisp.loc[iDisp-1,'t']
dfVas.loc[iVas,'tEnd'] = float(data[0])
dfVas.loc[iVas,'name'] = data[3][:-1]
value = float(data[-1].split("=")[-1])
dfVas.loc[iVas,'rating'] = value
# if it's an image vas, set indices
if dfVas.loc[iVas,'type'] in ['afraid','scream']:
dfVas.loc[iVas,'run'] = run
dfVas.loc[iVas,'group'] = group
dfVas.loc[iVas,'block'] = block
dfVas.loc[iVas,'trial'] = trial
# if the response timed out, advance without RT/history
if "timed out" in line:
dfVas.loc[iVas,'RT'] = np.nan;
# NOTE: nan indicates unknown, not lack of keypress!
dfVas.loc[iVas,'timeToFirstPress'] = np.nan;
if dfVas.loc[iVas,'type'] in ['afraid','scream']:
print('WARNING: image rating scale at t=%g (run %d group %d block %d trial %d) timed out! RT and timeToFirstPress will be set to NaN.'%(dfVas.loc[iVas,'tStart'],run,group,block,trial))
else:
print('WARNING: mood rating scale at t=%g timed out! RT and timeToFirstPress will be set to NaN.'%(dfVas.loc[iVas,'tStart']))
# increment VAS index
iVas +=1;
elif "RT=" in line:
value = float(data[-1].split("=")[-1])
dfVas.loc[iVas,'RT'] = value
elif "history=" in line:
# get time to first button presss
if len(re.split('\), |, |\)]',line))>3:
timeToPress = float(re.split('\), |, |\)]',line)[3])
else:
timeToPress = dfVas.loc[iVas,'RT'] # if no press, default to RT
dfVas.loc[iVas,'timeToFirstPress'] = timeToPress
# increment VAS index
iVas +=1;
print('Done! Took %.1f seconds.'%(time.time()-t))
print('Extracting VAS data...')
t = time.time()
# Parse out mood and sound VAS results
dfMoodVas = dfVas.loc[pd.isnull(dfVas['imageFile']),:]
dfMoodVas = dfMoodVas.drop(['imageFile','CSplusPercent','run','group','block','trial','tImage'],1)
# split into mood & sound
dfSoundVas = dfMoodVas.loc[dfMoodVas.name.str.startswith('SoundCheck'),:]
dfMoodVas = dfMoodVas.loc[~dfMoodVas.name.str.startswith('SoundCheck'),:]
# reset indices
dfSoundVas = dfSoundVas.reset_index(drop=True)
dfMoodVas = dfMoodVas.reset_index(drop=True)
# Parse out image VAS results
dfImageVas = dfVas.loc[pd.notnull(dfVas['imageFile']),:]
dfImageVas = dfImageVas.drop('name',1)
# add Mood VAS types
isTraining = 'Training' in logFile
dfMoodVas = GetVasTypes(params,dfMoodVas,isTraining)
# add Sound VAS types (assuming only one question per sound!!!)
dfSoundVas['group']=np.arange(dfSoundVas.shape[0])
dfSoundVas['groupName']=[x.split('-')[0] for x in dfSoundVas.name]
dfSoundVas['type']='loud'
print('Done! Took %.1f seconds.'%(time.time()-t))
# Return results
return params, dfMoodVas, dfSoundVas, dfImageVas
# Add accurate group, groupName, and type columns to the dfMoodVas dataframe
def GetVasTypes(params,dfMoodVas,isTraining=False):
# declare constants
if isTraining:
vasGroups = ['PreRun1']
else:
vasGroups = ['PreSoundCheck','PostRun1','PostRun2','PostRun3']
magicWords = ['anxious','tired','worried are','mood','doing','feared']
# check each group file for magic words
for i,groupName in enumerate(vasGroups):
try:
vasFile = params['moodQuestionFile%d'%(i+1)]
print('reading %s...'%vasFile)
# read file to get list of questions
with open(vasFile,"r") as fi:
questions = []
for ln in fi:
if ln.startswith("?"):
questions.append(ln[1:])
for j,question in enumerate(questions):
isThis = dfMoodVas.name=='%s-%d'%(groupName,j)
dfMoodVas.loc[isThis,'group'] = i
dfMoodVas.loc[isThis,'groupName'] = groupName
for k,word in enumerate(magicWords):
if word in question:
dfMoodVas.loc[isThis,'type'] = word.split()[0]
except:
print('group %s not found.'%groupName)
return dfMoodVas # return modified dataframe
# Save figures of the image and mood VAS responses and RTs.
def SaveVasFigures(params,dfMoodVas,dfSoundVas,dfImageVas,outPrefix='ER3_'):
# Set up
outBase = os.path.basename(outPrefix) # filename without the folder
print('Plotting VAS data...')
t = time.time()
# === MOOD VAS === #
# Set up figure
moodFig = plt.figure(figsize=(8, 4), dpi=120, facecolor='w', edgecolor='k')
# Plot Ratings
plt.subplot(121)
# declare constants
if 'Training' in outPrefix:
vasGroups=['PreRun1']
else:
vasGroups = ['PreSoundCheck','PostRun1','PostRun2','PostRun3']
vasTypes = ['anxious','tired','worried','mood','doing','feared']
# vasTypes = dfMoodVas.type.unique()
for vasType in vasTypes:
isInType = dfMoodVas.type==vasType
plt.plot(dfMoodVas.loc[isInType,'group'],dfMoodVas.loc[isInType,'rating'],'.-',label=vasType)
plt.legend()
plt.xticks(range(len(vasGroups)),vasGroups)
plt.ylim([0,100])
plt.xticks(rotation=15)
plt.xlabel('group')
plt.ylabel('rating (0-100)')
plt.title('%s%d-%d\n Mood VAS Ratings'%(outBase,params['subject'],params['session']))
# Plot Reaction Times
plt.subplot(122)
for vasType in vasTypes:
isInType = dfMoodVas.type==vasType
plt.plot(dfMoodVas.loc[isInType,'group'],dfMoodVas.loc[isInType,'RT'],'.-',label=vasType)
plt.legend()
plt.xticks(range(len(vasGroups)),vasGroups)
plt.xticks(rotation=15)
plt.xlabel('group')
plt.ylabel('reaction time (s))')
plt.title('%s%d-%d\n Mood VAS RTs'%(outBase,params['subject'],params['session']))
# Save figure
outFile = '%s%d-%d_MoodVasFigure.png'%(outPrefix,params['subject'],params['session'])
print("Saving Mood VAS figure as %s..."%outFile)
moodFig.savefig(outFile)
# === SOUND CHECK VAS === #
# No sound checks in training task
if not 'Training' in outPrefix:
# declare constants
vasGroups = ['SoundCheck1','SoundCheck2','SoundCheck3']
vasTypes = ['loud']
# Set up figure
soundFig = plt.figure(figsize=(8, 4), dpi=120, facecolor='w', edgecolor='k')
# Plot Ratings
plt.subplot(121)
for vasType in vasTypes:
isInType = dfSoundVas.type==vasType
plt.plot(dfSoundVas.loc[isInType,'group'],dfSoundVas.loc[isInType,'rating'],'.-',label=vasType)
plt.legend()
plt.xticks(range(len(vasGroups)),vasGroups)
plt.ylim([0,100])
plt.xticks(rotation=15)
plt.xlabel('group')
plt.ylabel('rating (0-100)')
plt.title('%s subject %d session %d\n Sound VAS Ratings'%(outBase,params['subject'],params['session']))
# Plot Reaction Times
plt.subplot(122)
for vasType in vasTypes:
isInType = dfSoundVas.type==vasType
plt.plot(dfSoundVas.loc[isInType,'group'],dfSoundVas.loc[isInType,'RT'],'.-',label=vasType)
plt.legend()
plt.xticks(range(len(vasGroups)),vasGroups)
plt.xticks(rotation=15)
plt.xlabel('group')
plt.ylabel('reaction time (s))')
plt.title('%s subject %d session %d\n Sound VAS RTs'%(outBase,params['subject'],params['session']))
# Save figure
outFile = '%s%d-%d_SoundVasFigure.png'%(outPrefix,params['subject'],params['session'])
print("Saving Sound VAS figure as %s..."%outFile)
soundFig.savefig(outFile)
# === IMAGE VAS === #
# Plot image VAS results
imgFig = plt.figure(figsize=(8, 4), dpi=120, facecolor='w', edgecolor='k')
# Plot Ratings
plt.subplot(121)
vasTypes = dfImageVas.type.unique()
for vasType in vasTypes:
isInType = dfImageVas.type==vasType
plt.plot(dfImageVas.loc[isInType,'CSplusPercent'],dfImageVas.loc[isInType,'rating'],'.',label=vasType)
plt.legend()
plt.ylim([0,100])
plt.xticks(rotation=15)
plt.xlabel('CS plus level (%)')
plt.ylabel('rating (0-100)')
plt.title('%s%d-%d\n Image VAS Ratings'%(outBase,params['subject'],params['session']))
# Plot Reaction Times
plt.subplot(122)
for vasType in vasTypes:
isInType = dfImageVas.type==vasType
plt.plot(dfImageVas.loc[isInType,'CSplusPercent'],dfImageVas.loc[isInType,'RT'],'.',label=vasType)
plt.legend()
plt.xticks(rotation=15)
plt.xlabel('CS plus level (%)')
plt.ylabel('reaction time (s))')
plt.title('%s%d-%d\n Image VAS RTs'%(outBase,params['subject'],params['session']))
# Save figure
outFile = '%s%d-%d_ImageVasFigure.png'%(outPrefix,params['subject'],params['session'])
print("Saving Image VAS figure as %s..."%outFile)
imgFig.savefig(outFile)
print('Done! Took %.1f seconds.'%(time.time()-t))
# Convert mood VAS to a single line for logging to multi-subject spreadsheet
def GetSingleVasLine(params,dfVas,isTraining=False,isSoundVas=False):
# === Convert table to single line
# Declare names of VAS groups/types-within-groups (to be used in legends/tables)
if isTraining:
vasGroups = ['PreRun1']
else:
if isSoundVas:
vasGroups = ['SoundCheck1','SoundCheck2','SoundCheck3']
else:
vasGroups = ['PreSoundCheck','PostRun1','PostRun2','PostRun3'] # shorthand for each VAS group based on their position in the task
if isSoundVas:
vasTypes = ['loud']
else:
vasTypes = ['anxious','tired','worried','mood','doing','feared'] # shorthand for VAS0, VAS1, VAS2, etc.
# Convert
cols = ['subject','session','date']
for vasGroup in vasGroups:
for vasType in vasTypes:
cols = cols + ['%s_%s_rating'%(vasGroup,vasType)]
for vasGroup in vasGroups:
for vasType in vasTypes:
cols = cols + ['%s_%s_RT'%(vasGroup,vasType)]
# create dataframe
dfVas_singleRow = pd.DataFrame(columns=cols)
dfVas_singleRow.subject = dfVas_singleRow.subject.astype(int) # convert SDAN to integer
# dfVas_singleRow = dfVas_singleRow.set_index('subject') # set SDAN as index
dfVas_singleRow.loc[0,'subject'] = int(params['subject'])
dfVas_singleRow.loc[0,'session'] = int(params['session'])
dfVas_singleRow.loc[0,'date'] = params['date']
# Fill out single row
for vasGroup in vasGroups:
for vasType in vasTypes:
isThis = np.logical_and(dfVas.groupName==vasGroup, dfVas.type==vasType)
if np.any(isThis):
dfVas_singleRow.loc[0,'%s_%s_rating'%(vasGroup,vasType)] = dfVas.loc[isThis,'rating'].values[0]
dfVas_singleRow.loc[0,'%s_%s_RT'%(vasGroup,vasType)] = dfVas.loc[isThis,'RT'].values[0]
return dfVas_singleRow
# Write events to BIDS-formatted events files
def WriteBidsEventsFiles(dfDisp,dfKey,dfImageVas,dfBlock,subject,outFolder='./',isTraining=False):
# make sure times are floats, not strings
dfDisp['t'] = dfDisp['t'].astype(float)
dfKey['t'] = dfKey['t'].astype(float)
# Declare column names (based on BIDS specs)
colNames=['onset','duration','identifier','trial_type','stim_file','response'];
# Get stim durations
tDisp = dfDisp.t.values;
dfDisp['duration'] = 0;
dfDisp.duration.iloc[:-1] = tDisp[1:] - tDisp[:-1]
# find times of scan starts
tWaitForStarts = dfDisp.loc[dfDisp.stim=='WaitingForScanner','t'].values
print('Writing BIDS event files for %d runs...'%len(tWaitForStarts))
# create output directory
fileOutDir = os.path.join(outFolder,'sub-%05d'%subject,'func')
if not os.path.exists(fileOutDir):
os.makedirs(fileOutDir)
for iRun,tWait in enumerate(tWaitForStarts):
# get scan start and end time based on display
tStartScan = dfKey.loc[(dfKey.key=='5') & (dfKey.t>tWait),'t'].values[0]
tEndScan = dfBlock.loc[(dfBlock.run)==iRun+1,'tEnd'].values[-1]
# get events & info specifically in this scan
isInScan = (dfKey.t>tStartScan) & (dfKey.t<tEndScan)
dfKey_scan = dfKey[isInScan]
isInScan = (dfDisp.t>tStartScan) & (dfDisp.t<tEndScan)
dfDisp_scan = dfDisp[isInScan]
isInScan = (dfImageVas.tStart>tStartScan) & (dfImageVas.tStart<tEndScan)
dfImageVas_scan = dfImageVas[isInScan]
CSnum = dfImageVas_scan.CSplusPercent.values
blockType = dfImageVas_scan.type.values
# make dataframe for keypress events
dfEvents1 = pd.DataFrame(columns=colNames);
dfEvents1['onset'] = dfKey_scan.t-tStartScan
dfEvents1['identifier'] = ['key-down_%s'%x for x in dfKey_scan.key]
dfEvents1['duration'] = 0;
#dfEvents.loc[isImage,'trial_type'] = ['%s_CS%s'%(blockType[i],CSnum[i]) for i in range(len(CSnum))]
# make dataframe for display events
dfEvents2 = pd.DataFrame(columns=colNames);
dfEvents2['onset'] = dfDisp_scan.t-tStartScan
dfEvents2['duration'] = dfDisp_scan['duration']
dfEvents2['identifier'] = ['disp_%s'%x for x in dfDisp_scan.stim]
isImage = pd.notna(dfDisp_scan.CS)
isImageRating = dfDisp_scan.stim=='ImageRating0';
dfEvents2.loc[isImage,'trial_type'] = ['%s_CS-%s'%(blockType[i],CSnum[i]) for i in range(len(CSnum))]
dfEvents2.loc[isImageRating,'trial_type'] = ['%s_CS-%s'%(blockType[i],CSnum[i]) for i in range(len(CSnum))]
dfEvents2.loc[isImage,'stim_file'] = dfDisp_scan.loc[isImage,'stim']
dfEvents2.loc[isImageRating,'stim_file'] = dfDisp_scan.loc[isImage,'stim'].values
dfEvents2.loc[isImage,'identifier'] = 'disp_Face'
dfEvents2.loc[isImageRating,'identifier'] = 'disp_ImageRating'
dfEvents2.loc[isImageRating,'response'] = dfImageVas_scan.rating.values # rating final value
# dfEvents2.loc[isImageRating,'response_time'] = dfImageVas_scan.RT.values # rating final value
# combine dataframes
dfEvents = pd.concat((dfEvents1,dfEvents2))
# convert numbers to floats
dfEvents['onset'] = dfEvents['onset'].astype(float)
dfEvents['duration'] = dfEvents['duration'].astype(float)
dfEvents['response'] = dfEvents['response'].astype(float)
# sort events chronologically
dfEvents = dfEvents.sort_values('onset')
dfEvents = dfEvents.reset_index(drop=True)
# write to file
if isTraining:
fileOut = os.path.join(fileOutDir,'sub-%05d_task-ER3Training_run-%d_events.tsv'%(subject,iRun+1))
else:
fileOut = os.path.join(fileOutDir,'sub-%05d_task-ER3_run-%d_events.tsv'%(subject,iRun+1))
print('Writing BIDS-formatted events to %s...'%fileOut)
dfEvents.to_csv(fileOut,index=False,sep='\t',float_format='%.3f',na_rep='n/a')
print('Done!')
# Do everything: import the log, produce the figures, and produce the tables.
def ProcessERLog(logFilename,outFolder,makeBids=False):
# Get experiment type
isTraining = ('Training' in logFilename) # is it a training run?
# import data
if makeBids:
readParams,dfMoodVas,dfSoundVas,dfImageVas,dfKey,dfDisp,dfSync,dfBlock = ImportExtinctionRecallTaskLog(logFilename)
WriteBidsEventsFiles(dfDisp,dfKey,dfImageVas,dfBlock,readParams['subject'],outFolder,isTraining)
else:
readParams,dfMoodVas,dfSoundVas,dfImageVas = ImportExtinctionRecallTaskLog_VasOnly(logFilename)
# create output folder if it doesn't exist
subjOutFolder = os.path.join(outFolder,'%d'%(readParams['subject']))
if not os.path.exists(subjOutFolder):
os.makedirs(subjOutFolder)
# declare cross-subject table filenames
if isTraining: # if it's a training run
outMoodTable = os.path.join(outFolder,'ER3Training-MoodVasTable.xlsx')
subjOutPrefix = os.path.join(subjOutFolder,'ER3Training_')
else:
outMoodTable = os.path.join(outFolder,'ER3-MoodVasTable.xlsx')
outSoundTable = os.path.join(outFolder,'ER3-SoundVasTable.xlsx')
subjOutPrefix = os.path.join(subjOutFolder,'ER3_')
# make figures
SaveVasFigures(readParams,dfMoodVas,dfSoundVas,dfImageVas,subjOutPrefix)
# convert mood VAS to single line
dfMoodVas_singleRow = GetSingleVasLine(readParams,dfMoodVas,isTraining)
# Append output table to file
print("Appending to Mood VAS table %s..."%os.path.basename(outMoodTable))
if os.path.exists(outMoodTable):
dfMoodVas_all = pd.read_excel(outMoodTable,index_col=None)
dfMoodVas_all = dfMoodVas_all.append(dfMoodVas_singleRow)
dfMoodVas_all = dfMoodVas_all.drop_duplicates()
dfMoodVas_all.to_excel(outMoodTable,index=False)
else:
dfMoodVas_singleRow.to_excel(outMoodTable,index=False)
if not isTraining: # if it's not a training run
# convert sound VAS to single line
dfSoundVas_singleRow = GetSingleVasLine(readParams,dfSoundVas,isTraining,isSoundVas=True)
# Append output table to file
print("Appending to Sound VAS table %s..."%os.path.basename(outSoundTable))
if os.path.exists(outSoundTable):
dfSoundVas_all = | pd.read_excel(outSoundTable,index_col=None) | pandas.read_excel |
import pandas as pd
import sys
from Bio import SeqIO
sys.stderr = sys.stdout = open(snakemake.log[0], "w")
num_files = len(snakemake.input.annotations)
snakemake.output.taxid_db
annotations_id_dict = {}
annotations_name_dict = {}
# In annotations[0] if could be an empty string or the table provided by the user
if snakemake.input.annotations[0]:
annotations_df = pd.read_table(snakemake.input.annotations[0])
annotations_id_dict = annotations_df.set_index('protein_id').genome_id.to_dict()
if 'genome_name' in annotations_df.columns:
annotations_name_dict = annotations_df.set_index('protein_id').genome_name.to_dict()
# First there is always all_databases[0] because there is at least the perso database
parser_fasta = SeqIO.parse(snakemake.input.all_databases[0], 'fasta')
tmp_df = []
with open(snakemake.output.taxid_db, 'wt') as fasta_file:
for prot in parser_fasta:
length = len(prot.seq)
# Security to remove protein id from description
description = prot.description.replace(f"{prot.id} ", "")
description = prot.description.replace(f"{prot.id}", "")
if annotations_name_dict:
genome_name = annotations_name_dict[prot.id]
else:
# if no genome name create a fake one
genome_name = ''
if annotations_id_dict:
genome_id = annotations_id_dict[prot.id]
protein_id = f"{prot.id}--{genome_id}"
else:
id_split = prot.id.split('--')
genome_id = id_split[1]
protein_id = prot.id
# Adding the information inside the datafame to be
tmp_df.append(
{
'protein_id':protein_id,
'genome_id':genome_id,
'length':length,
'protein_description':description,
'genome_name':genome_name,
}
)
prot.id = protein_id
prot.description = description
prot.name = ''
SeqIO.write(prot, fasta_file, 'fasta')
merge_df = pd.DataFrame(tmp_df)
if num_files == 2:
prot_taxid = snakemake.input.annotations[1]
taxid_df = pd.read_table(prot_taxid)
merge_df = | pd.concat([taxid_df, merge_df]) | pandas.concat |
#!/usr/bin/env python
import re, argparse, sys, os
import numpy as np
import pandas as pd
class MQcolnames(object):
def __init__(self, df):
self.columns = df.columns.tolist()
self.new2old_colnames_dict = {"Potential contaminant": "Contaminant",
"Modified sequence": "Modified Sequence",
"Raw file": "Raw File",
"Razor + unique peptides": "Razor + unique Peptides",
"Leading razor protein": "Leading Razor Protein"}
def check_if_colname_or_similar_exists(self, colname):
"""
return the correct column name or False
:param colname: String
:return: String or Boolean
"""
if colname in self.columns:
return colname
else:
if colname in self.new2old_colnames_dict:
return self.new2old_colnames_dict[colname]
else:
print("Column doesn't exist: {}".format(colname))
import ipdb
ipdb.set_trace()
return False
def add_num_MNQ_cols(df):
"""
add 3 columns with Integers to DataFrame, count the number of M, N and Q from Sequence column
:param df: DataFrame
:return: DataFrame
"""
aaseq2count_MNQ_dict = {}
aaseq_arr = df["Sequence"].unique()
for aaseq in aaseq_arr:
aaseq2count_MNQ_dict[aaseq] = count_MNQ(aaseq)
df['num_M'] = df["Sequence"].apply(lambda aaseq: aaseq2count_MNQ_dict[aaseq][0], 1)
df['num_N'] = df["Sequence"].apply(lambda aaseq: aaseq2count_MNQ_dict[aaseq][1], 1)
df['num_Q'] = df["Sequence"].apply(lambda aaseq: aaseq2count_MNQ_dict[aaseq][2], 1)
return df
def count_MNQ(aaseq):
M_count = aaseq.count('M')
N_count = aaseq.count('N')
Q_count = aaseq.count('Q')
return M_count, N_count, Q_count
def add_num_MNQ_ratios(df, mqc=None):
"""
add 4 columns to DataFrame: Int, Int, Float, Float
number of N modified to D and analogous Q to E,
the ratio of N / (N + N2D) and
analogous the ratio of Q / (Q + Q2E)
:param df: DataFrame
:param mqc: MQcolumns instance
:return: DataFrame
"""
if not mqc:
mqc = MQcolnames(df)
colname = mqc.check_if_colname_or_similar_exists("Modified sequence")
aaseq2count_MNQmod_dict = {}
aaseq_mod_arr = df[colname].unique()
for aaseq in aaseq_mod_arr:
aaseq2count_MNQmod_dict[aaseq] = count_MNQmod(aaseq)
df['num_OxM'] = df[colname].apply(lambda aaseq: aaseq2count_MNQmod_dict[aaseq][0], 1)
df['num_N2D'] = df[colname].apply(lambda aaseq: aaseq2count_MNQmod_dict[aaseq][1], 1)
df['num_Q2E'] = df[colname].apply(lambda aaseq: aaseq2count_MNQmod_dict[aaseq][2], 1)
# sanity check: "Deamidation (NQ)" == ( "num_N2D" + "num_Q2E" )
assert (df["Deamidation (NQ)"] == ( df["num_N2D"] + df["num_Q2E"] )).all()
df['ratio_OxM'] = df['num_OxM']*1.0 / df['num_M']
df['ratio_N2D'] = df['num_N2D']*1.0 / df['num_N']
df['ratio_Q2E'] = df['num_Q2E']*1.0 / df['num_Q']
return df
def count_MNQmod(aaseq, char2find=r"\(de\)|\(ox\)"):
"""
count the number of oxidized M, N modified to D and Q modified to E
e.g.
aaseq = r"_AAIAAFNAQ(de)N(de)N(de)GSNFQIEEISR_"
count_N2D_Q2E(aaseq)
:param aaseq: String
:param char2find: String
:return: Tuple(Int, Int)
"""
MNQ_list = [aaseq[i.start() - 1] for i in re.finditer(char2find, aaseq)]
OxM_count = MNQ_list.count("M")
N2D_count = MNQ_list.count("N")
Q2E_count = MNQ_list.count("Q")
return OxM_count, N2D_count, Q2E_count
def percent_deamidation(group, abundance_colname="Intensity"):
"""
calculate the deamidation rate (percentage) for N to D and Q to E;
D = number of Aspartic acids (mod. N)
N = number of Asparagines
abu = abundance
each per charge state
(D0*abu_0 + D1*abu_1 + D2*abu_2) / ((D0+N0)*abu_0 + (D1+N1)*abu_1 + (D2+N2)*abu_2) # long
(ratio_0*abu_0 + ratio_1*abu_1 + ratio_2*abu_2) / sum([abu_0, abu_1, abu_2]) # short
abundance_colname: "Intensity", "MS/MS Count"
ca. 14 times faster :) than conditional indexing
:param group: DataFrame(grouped)
:param abundance_colname: String
:return: DataFrame(grouped
"""
group["perc_OxM"] = 100.0 * sum(group["ratio_OxM"] * group[abundance_colname]) / sum(group[abundance_colname])
group["perc_DeamN"] = 100.0 * sum(group["ratio_N2D"] * group[abundance_colname]) / sum(group[abundance_colname])
group["perc_DeamQ"] = 100.0 * sum(group["ratio_Q2E"] * group[abundance_colname]) / sum(group[abundance_colname])
return group
def avg_PSM2Pep(df, colname_RawFile, colname_LeadingRazorProtein, mean_or_median="mean"):
"""
average PSMs to Peptides
:param df: DataFrame (with perc_DeamN col --> calc deamidation per RawFile, Sequence and Charge)
:param colname_RawFile: String
:param colname_LeadingRazorProtein: String
:param mean_or_median: String(Flag to select mean/average or median)
:return: DataFrame (with reduced shape)
"""
# subset columns needed
dfx = df[[colname_RawFile, colname_LeadingRazorProtein, "Sequence", "Charge", "perc_OxM", "perc_DeamN", "perc_DeamQ"]].drop_duplicates()
# average PSMs to Peptides (merge charge states of same sequence)
dfx = dfx.groupby([colname_RawFile, colname_LeadingRazorProtein, "Sequence"])[["perc_OxM", "perc_DeamN", "perc_DeamQ"]].agg(mean_or_median)
dfx = dfx.reset_index()
return dfx
def avg_Pep2Prot(df, colname_RawFile, colname_LeadingRazorProtein, mean_or_median="mean"):
"""
average Peptides to Proteins
:param df: DataFrame (with perc_DeamN col --> calc deamidation per RawFile, Sequence and Charge)
:param colname_RawFile: String
:param colname_LeadingRazorProtein: String
:param mean_or_median: String(Flag to select mean/average or median)
:return: DataFrame (with reduced shape)
"""
# averge Peptides to Proteins (merge sequences of same protein)
dfx = df.groupby([colname_RawFile, colname_LeadingRazorProtein])[["perc_OxM", "perc_DeamN", "perc_DeamQ"]].agg(mean_or_median)
dfx = dfx.reset_index()
return dfx
def yield_random_combination_with_replacement(iterable, n_times):
"""
should be even more memory efficient :)
random selection with replacement from iterable of len(iterable) --> produces results
then randomly select n_times from results (since results are sorted and
single values are always recurring at beginning of results)
:param iterable: Iterable
:param n_times: Int (number of random combinations)
:return: Generator (with yields iterable)
"""
for _ in range(0, n_times):
yield np.random.choice(iterable, len(iterable), replace=True)
def bootstrap_RFgrouped_avg_Pep2Prot_avg_Prot(df, colname_LeadingRazorProtein, mean_or_median="mean"):
"""
calculate deamidation for N and Q by averaging Peptides to Proteins
and averaging Proteins
expects a DataFrame of one RawFile
:param df: DataFrame(evidence of one RawFile)
:param colname_LeadingRazorProtein: String
:param mean_or_median: String(Flag to select mean/average or median)
:return: Series
"""
dft = df.groupby(colname_LeadingRazorProtein)[["perc_OxM", "perc_DeamN", "perc_DeamQ"]].agg(mean_or_median)
return dft[["perc_OxM", "perc_DeamN", "perc_DeamQ"]].agg(mean_or_median)
def bootstrap_RFgrouped_avg_Pep(df, mean_or_median="mean"):
"""
calculate deamidation for N and Q by averaging Peptides to Proteins
and averaging Proteins
expects a DataFrame of one RawFile
:param df: DataFrame(evidence of one RawFile)
:param mean_or_median: String(Flag to select mean/average or median)
:return: Series
"""
return df[["perc_OxM", "perc_DeamN", "perc_DeamQ"]].agg(mean_or_median)
def run(fn_evidence, abundance_colname, ci, sampling, num_bootstraps, output_dir, colname_proteins, protein_bootstraps=False):
"""
:param fn_evidence: String (absolute path to 'evidence.txt' file)
:param abundance_colname: String (column name of abundance data e.g. 'Intensity')
:param ci: Integer (Confidence Interval e.g. 95)
:param sampling: String ("peptides" or "proteins")
:param num_bootstraps: Integer (Number of Bootstrap iterations e.g. 1000)
:param output_dir: String (absolute path to output directory)
:param colname_proteins: String (column name of Protein Accession Numbers/Identifierse.g. 'Leading razor protein')
:param protein_bootstraps: Boolean(flag to get additional output --> bootstrap Peptides per Protein per RawFile
(usually there are not enough data for this to be meaningful and thus this can lead to misleading results,
this is NOT recommended as a default analysis)
:return: None
"""
###### use a MaxQuant evidence.txt as input
## merging data per RawFile
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Reading file")
df = pd.read_csv(fn_evidence, sep='\t')
mqc = MQcolnames(df)
colname_proteins = mqc.check_if_colname_or_similar_exists(colname_proteins)
colname_RawFile = mqc.check_if_colname_or_similar_exists("Raw file")
print("Calculating N2D and Q2E ratios")
df = add_num_MNQ_cols(df)
df = add_num_MNQ_ratios(df)
## calc deamidation per RawFile, Sequence and Charge
df = df.groupby(by=[colname_RawFile, "Sequence", "Charge"], axis=0).apply(percent_deamidation, abundance_colname)
fn_out_num_peptides = os.path.join(output_dir, "Number_of_Peptides_per_RawFile.txt")
print("Writing {}".format(fn_out_num_peptides))
_ = number_of_N_Q_peptides_with_DeamPerc(df, colname_RawFile, fn_out_num_peptides)
### Protein level deamidation --> without CI
dfp = avg_PSM2Pep(df, colname_RawFile, colname_proteins)
dfp = avg_Pep2Prot(dfp, colname_RawFile, colname_proteins)
fn_out = os.path.join(output_dir, "Protein_deamidation.txt")
print("Writing {}".format(fn_out))
dfp.to_csv(fn_out, sep='\t', header=True, index=False)
print("Bootstrapping")
fn_out_bootstrapped = os.path.join(output_dir, "Bootstrapped_values.txt")
df_bs = deam_per_RawFile_returnBootstrappedVals(df, mqc, colname_proteins, sampling=sampling, num_bootstraps=num_bootstraps, groupby_=colname_RawFile, mean_or_median="mean", average_method="Peptides_per_groupby_")
df_bs = df_bs.reset_index(drop=True)
print("Writing results to: {}".format(fn_out_bootstrapped))
df_bs.to_csv(fn_out_bootstrapped, sep='\t', header=True, index=False)
fn_out = os.path.join(output_dir, "Deamidation.txt")
print("Writing {}".format(fn_out))
calculate_mean_and_CIs(df_bs, ci, colname_RawFile, fn_out)
if protein_bootstraps:
print("Calculating protein level deamidation per RawFile using bootstraps")
df_protein_boots = deam_per_RawFile_returnBootstrappedVals_bootstrapPeptidesPerProtein(df, mqc, colname_proteins=colname_proteins, num_bootstraps=num_bootstraps, groupby_=colname_RawFile)
fn_out_bootstrapped_proteins = os.path.join(output_dir, "Bootstrapped_values_proteins.txt")
print("Writing results to: {}".format(fn_out_bootstrapped_proteins))
df_protein_boots.to_csv(fn_out_bootstrapped_proteins, sep='\t', header=True, index=False)
fn_out_proteins = os.path.join(output_dir, "Deamidation_proteins.txt")
print("Writing {}".format(fn_out_proteins))
cond_not_NaN = df_protein_boots["percDeam"].notnull() # need to be removed for CI calculation, but left them in deamidation vals
calculate_mean_and_CIs_proteins(df_protein_boots[cond_not_NaN], ci, colname_RawFile, colname_proteins, fn_out_proteins)
fn_out_num_peptides_proteins = os.path.join(output_dir, "Number_of_Peptides_per_Protein_per_RawFile.txt")
print("Writing {}".format(fn_out_num_peptides_proteins))
_ = number_of_N_Q_peptides_with_DeamPerc_per_Protein(df, colname_RawFile, colname_proteins, fn_out_num_peptides_proteins)
def deam_per_RawFile_returnBootstrappedVals(df, mqc, colname_proteins="Leading razor protein", sampling="peptides", num_bootstraps=1000, groupby_="Raw file", mean_or_median="mean", average_method="Peptides_per_groupby_"):
"""
calculate deamidation-MEAN, but save values for BoxPlot
additionally calculate median and add to plot
first collapse PSMs to Peptides by averaging
then either bootstrap sampling either of Peptides
or collapse Pepties to Proteins and bootstrap Proteins
:param df: DataFrame (evidence.txt with deam on PSM level)
:param mqc: MQcolnames Instance (check old vs. new colnames in tools-module)
:param colname_proteins: String (column name e.g. 'Leading razor protein')
:param sampling: String ("peptides" or "proteins")
:param num_bootstraps: Int
:param groupby_: String ('Raw file' or 'Experiment') --> if Experiment names are set
properly, then using Experiment will result in useful names for the plot
:param mean_or_median: String(Flag to select mean/average or median)
:param average_method: String(Flag to select average Peptides to Proteins and average Proteins per RawFile/Folder OR average Peptides per RawFile/Folder)
:return: DataFrame (Raw File, N_Q, percDeam, CI_low, CI_up)
"""
colname_RawFile = mqc.check_if_colname_or_similar_exists(groupby_)
colname_proteins = mqc.check_if_colname_or_similar_exists(colname_proteins)
# average PSMs to Peptides
dfx = avg_PSM2Pep(df, colname_RawFile, colname_proteins, mean_or_median=mean_or_median)
if sampling == "peptides":
# sampling Peptides for every RawFile
pass
elif sampling == "proteins":
# sampling proteins for every RawFile
# average Peptides to Proteins
dfx = avg_Pep2Prot(dfx, colname_RawFile, colname_proteins, mean_or_median=mean_or_median)
colname_RawFile = mqc.check_if_colname_or_similar_exists(groupby_)
grouped = dfx.groupby(colname_RawFile)
df_list = []
for name, group in grouped:
print(name)
df_temp = pd.DataFrame()
# df_temp[groupby_] = groupby_
group_index = group.index.tolist()
group_m = []
group_n = []
group_q = []
for random_index in yield_random_combination_with_replacement(group_index, num_bootstraps):
dft = dfx.loc[random_index, :]
### sanity check
cond = dft.index == random_index
assert cond.all() == True
### average Peptides to Proteins and average Proteins per RawFile/Folder
if average_method == "Peptides_2_Proteins_per_groupby_":
ser = bootstrap_RFgrouped_avg_Pep2Prot_avg_Prot(dft, colname_proteins, mean_or_median=mean_or_median)
### average Peptides per RawFile/Folder
elif average_method == "Peptides_per_groupby_":
ser = bootstrap_RFgrouped_avg_Pep(dft, mean_or_median=mean_or_median)
else:
print("Method doesn't exist")
raise StopIteration
group_m.append(ser["perc_OxM"])
group_n.append(ser["perc_DeamN"])
group_q.append(ser["perc_DeamQ"])
group_m = np.array(group_m)
group_n = np.array(group_n)
group_q = np.array(group_q)
group_m = group_m[np.isfinite(group_m)]
group_n = group_n[np.isfinite(group_n)]
group_q = group_q[np.isfinite(group_q)]
if group_m.size == 0:
group_m = np.nan
if group_n.size == 0:
group_n = np.nan
if group_q.size == 0:
group_q = np.nan
df_temp["perc_OxM"] = pd.Series(group_m)
df_temp["perc_DeamN"] = pd.Series(group_n)
df_temp["perc_DeamQ"] = pd.Series(group_q)
df_temp[groupby_] = name
df_list.append(df_temp)
dfm = pd.concat(df_list)
dfm = | pd.melt(dfm, id_vars=[groupby_], value_vars=["perc_OxM", "perc_DeamN", "perc_DeamQ"], var_name="MNQ", value_name="percDeam") | pandas.melt |
import json
import sys
from functools import partial
import numpy as np
import pandas as pd
if sys.version_info[0] < 3:
pass
else:
pass
from tqdm import tqdm
def create_et_data(data, n_bins=5):
data["et_data"] = data['et_data'] \
.apply(str) \
.str.replace('$', ',', regex=False)
et_indices = data[( | pd.notna(data['et_data']) | pandas.notna |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = | u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""") | pandas.compat.u |
import numpy as np
import pandas as pd
from .real_datasets import RealDataset
class KDDCup(RealDataset):
def __init__(self, seed):
super().__init__(
name="KDD Cup '99", raw_path='kddcup-data_10_percent_corrected.txt', file_name='kdd_cup.npz'
)
self.seed = seed
def load(self):
(a, b), (c, d) = self.get_data_dagmm()
self._data = (a, b, c, d)
def get_data_dagmm(self):
"""
This approach is used by the DAGMM paper (Zong et al., 2018) and was first described in Zhai et al.,
Deep structured energy based models for anomaly detection:
"As 20% of data samples are labeled as “normal” and the rest are labeled as “attack”, “normal” samples are in a
minority group; therefore, “normal” ones are treated as anomalies in this task" - Zong et al., 2018
"[...]in each run, we take 50% of data by random sampling for training with the rest 50% reserved for testing,
and only data samples from the normal class are used for training models.[...] - Zong et al., 2018"
:return: (X_train, y_train), (X_test, y_test)
"""
data = np.load(self.processed_path)
np.random.seed(self.seed)
labels = data['kdd'][:, -1]
features = data['kdd'][:, :-1]
normal_data = features[labels == 1]
normal_labels = labels[labels == 1]
attack_data = features[labels == 0]
attack_labels = labels[labels == 0]
n_attack = attack_data.shape[0]
rand_idx = np.arange(n_attack)
np.random.shuffle(rand_idx)
n_train = n_attack // 2
train = attack_data[rand_idx[:n_train]]
train_labels = attack_labels[rand_idx[:n_train]]
test = attack_data[rand_idx[n_train:]]
test_labels = attack_labels[rand_idx[n_train:]]
test = np.concatenate((test, normal_data), axis=0)
test_labels = np.concatenate((test_labels, normal_labels), axis=0)
return (pd.DataFrame(data=train), pd.DataFrame(data=train_labels)), (
| pd.DataFrame(data=test) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import main as mm
import work_wth_data as wd
def find_entry_date_range(filen):
"""input file or list, asks for descriptors, returns a list of lines"""
date_lower = input("Please enter a date LOWER BOUND (ex. 1-2-20, 10-21-19)(xxx to ignore): ")
date_higher = input("Please enter a date HIGHER BOUND (ex. 1-2-20, 10-21-19)(xxx to ignore): ")
check_string = "\n\n\nIs this Correct?\n----------------\n"
check_string += 'Date = "' + date_lower + '"\n'
check_string += 'Date = "' + date_higher + '"\n'
print(check_string)
confirmation = mm.get_yes_or_no("Please enter Y)es or N)o: ")
if confirmation == 'Y':
list_of_lines = []
if 'xxx' not in date_lower:
year_lower = int(date_lower.split('-')[2])
month_lower = int(date_lower.split('-')[0])
day_lower = int(date_lower.split('-')[1])
if 'xxx' not in date_higher:
year_higher = int(date_higher.split('-')[2])
month_higher = int(date_higher.split('-')[0])
day_higher = int(date_higher.split('-')[1])
for line in filen:
line_orig = line
line = line.split(',')
line_year = int(line[0].split('-')[2])
line_month = int(line[0].split('-')[0])
line_day = int(line[0].split('-')[1])
if 'xxx' not in date_lower:
if line_year < year_lower:
continue
if line_year == year_lower:
if line_month < month_lower:
continue
if line_month == month_lower:
if line_day < day_lower:
continue
if 'xxx' not in date_higher:
if line_year > year_higher:
continue
if line_year == year_higher:
if line_month > month_higher:
continue
if line_month == month_higher:
if line_day > day_higher:
continue
list_of_lines += [line_orig]
return list_of_lines
else:
return find_entry_date_range(filen)
def total_for_date_range(s_or_r, first_time, listoas, old_listoas):
"""input 's' or 'r', first_time, list, and old list. Asks for date range, displays amount"""
if s_or_r == 's':
filepath = mm.get_user() + '/all.csv'
string = '\n\n\n\n\n\n\nSearch for Spent Range'
category_line = 'Date,Amount,Description,Category,Method of purchase'
else:
filepath = mm.get_user() + '/rec_all.csv'
string = '\n\n\n\n\n\n\nSearch for Received Range'
category_line = 'Date,Amount,Description,Category'
if first_time == True:
file = open(filepath, 'r')
print(string)
file.readline()
list_of_lines = find_entry_date_range(file)
print('\n|{:<6}'.format(0) + wd.display_line(category_line))
print(wd.display_many_lines(list_of_lines))
print('Total for Selection is ' + wd.format_dollars(wd.get_amount_for_list_of_lines(list_of_lines)) + '\n')
file.close()
choice = input('R)efine or D)one: ')
if choice.lower() == 'r':
return total_for_date_range(s_or_r, False, list_of_lines, list_of_lines)
elif choice.lower() == 'd':
print('Thanks!!\n\n\n\n\n')
return list_of_lines
else:
return total_for_date_range(s_or_r, 'r', list_of_lines, list_of_lines)
if first_time == 'r':
choice = input('R)efine or D)one: ')
if choice.lower() == 'r':
return total_for_date_range(s_or_r, False, listoas, old_listoas)
elif choice.lower() == 'd':
print('Thanks!!\n\n\n\n\n')
return listoas
else:
return total_for_date_range(s_or_r, 'r', listoas, old_listoas)
if first_time == False:
if s_or_r == 's':
string = '\n\n\n\n\n\n\nRefine Search for Spent Range'
else:
string = '\n\n\n\n\n\n\nRefine Search for Received Range'
print(string)
list_of_lines_new = find_entry_date_range(listoas)
print('\n|{:<6}'.format(0) + wd.display_line(category_line))
print(wd.display_many_lines(list_of_lines_new))
print('Total for Selection is ' + wd.format_dollars(wd.get_amount_for_list_of_lines(list_of_lines_new)) + '\n')
choice = input('R)efine, U)ndo, or D)one: ')
if choice.lower() == 'r':
return total_for_date_range(s_or_r, False, list_of_lines_new, listoas)
elif choice.lower() == 'd':
print('Thanks!!\n\n\n\n\n')
return list_of_lines_new
elif choice.lower() == 'u':
print('\n|{:<6}'.format(0) + wd.display_line(category_line))
print(wd.display_many_lines(listoas))
print('Total for Selection is ' + wd.format_dollars(wd.get_amount_for_list_of_lines(listoas)) + '\n')
return total_for_date_range(s_or_r, False, listoas, old_listoas)
else:
return total_for_date_range(s_or_r, 're', list_of_lines_new, listoas)
if first_time == 're':
choice = input('R)efine, U)ndo, or D)one: ')
if choice.lower() == 'r':
return total_for_date_range(s_or_r, False, listoas, old_listoas)
elif choice.lower() == 'd':
print('Thanks!!\n\n\n\n\n')
return listoas
elif choice.lower() == 'u':
print('\n|{:<6}'.format(0) + wd.display_line(category_line))
print(wd.display_many_lines(old_listoas))
print('Total for Selection is ' + wd.format_dollars(wd.get_amount_for_list_of_lines(old_listoas)) + '\n')
return total_for_date_range(s_or_r, False, old_listoas, old_listoas)
else:
return total_for_date_range(s_or_r, 're', listoas, old_listoas)
#--------------------------------------------
def get_s_or_r():
"""Gets the user to enter 's' or 'p'"""
answer = input("Please enter S)pent or R)eceived: ").lower()
if answer == 's':
return answer
if answer == 'r':
return answer
else:
print('Input not understood. Please try again.')
return get_s_or_r()
def listify_list_of_lines(list_of_lines):
"""takes a list of entry lines and turns each entry in to a sublist"""
final_list = []
counter = 0
for item in list_of_lines:
final_line = []
item = item[:-1]
item = item.split(',')
if counter != 0:
item[1] = float(item[1][1:])
for thing in item:
final_line += [thing]
final_list += [final_line]
counter += 1
return final_list
def get_entries_for_display():
"""Retrieves and reaffirms date range data"""
s_or_r = get_s_or_r()
if s_or_r == 's':
category_line = 'Date,Amount,Description,Category,Method of purchase\n'
else:
category_line = 'Date,Amount,Description,Category\n'
list_of_lines = total_for_date_range(s_or_r, True, [], [])
check_string = "\n\n\nIs this Correct?\n----------------\n"
check_string += '|{:<6}'.format(0) + wd.display_line(category_line)
check_string += wd.display_many_lines(list_of_lines)
print(check_string)
print('Total for Selection is ' + wd.format_dollars(wd.get_amount_for_list_of_lines(list_of_lines)) + '\n')
confirmation = mm.get_yes_or_no("Please enter Y)es or N)o: ")
if confirmation == 'Y':
list_of_lines = [category_line] + list_of_lines
return listify_list_of_lines(list_of_lines)
else:
return get_entries_for_display()
def get_c_or_m():
"""Gets the user to enter 'c' or 'm'"""
answer = input("Please enter C)ategory or M)ethod of Purchase (for spent): ").lower()
if answer == 'c':
return answer
if answer == 'm':
return answer
else:
print('Input not understood. Please try again.')
return get_c_or_m()
def line_graph():
print("\nShows Line Graph of Amounts over time\n")
list_of_lines = get_entries_for_display()
first_date = list_of_lines[1][0]
last_date = list_of_lines[-1][0]
df = pd.DataFrame(list_of_lines[1:], columns=list_of_lines[0])
colors = ['green', 'red'] * len(list_of_lines)
fig = plt.figure('Line Graph from ' + first_date + ' to ' + last_date)
#-----------------------------------------------
ax1 = fig.add_subplot(111)
graph1 = df.groupby('Date', as_index=False, sort=False)['Amount'].sum()
ax = graph1.plot.line(x='Date', legend=None, ax=ax1, color=colors)
ax.set_xticks(range(len(graph1)))
ax.set_xticklabels([graph1["Date"][item] for item in graph1.index.tolist()], rotation=60)
plt.xlabel("Date")
plt.ylabel("Total Spent ($)")
plt.grid(axis='y')
plt.show()
def get_explode(data_frame):
listo = []
for item in range(len(data_frame)):
listo += [.2]
j = item
j += 1
return listo
def pie_chart():
"""displays amounts by category, or methodofp in a pie chart"""
print("\nShows Pie Chart of Category, or Method of Purchase over time\n")
c_or_m = get_c_or_m()
list_of_lines = get_entries_for_display()
first_date = list_of_lines[1][0]
last_date = list_of_lines[-1][0]
df = pd.DataFrame(list_of_lines[1:], columns=list_of_lines[0])
if c_or_m == 'c':
fig_name = 'Categories'
column = 'Category'
else:
fig_name = 'Method of Purchase'
column = 'Method of purchase'
plt.figure('Pie Chart of ' + fig_name + ' from ' + first_date + ' to ' + last_date)
#-----------------------------------------------
graph1 = df.groupby(column, as_index=False)['Amount'].sum()
plt.pie(graph1['Amount'], labels=graph1[column],shadow=False,
# with one slide exploded out
explode=get_explode(graph1),
# with the start angle at 90%
startangle=90,
# with the percent listed as a fraction
autopct='%1.1f%%',
)
plt.show()
def bar_graph():
"""displays amount by category or methodofp in a bar graph"""
print("\nShows Bar Graph of Category, or Method of Purchase over time\n")
c_or_m = get_c_or_m()
list_of_lines = get_entries_for_display()
first_date = list_of_lines[1][0]
last_date = list_of_lines[-1][0]
df = | pd.DataFrame(list_of_lines[1:], columns=list_of_lines[0]) | pandas.DataFrame |
"""This module contains PlainFrame and PlainColumn tests.
"""
import collections
import datetime
import pytest
import numpy as np
import pandas as pd
from numpy.testing import assert_equal as np_assert_equal
from pywrangler.util.testing.plainframe import (
NULL,
ConverterFromPandas,
NaN,
PlainColumn,
PlainFrame
)
@pytest.fixture
def plainframe_standard():
cols = ["int", "float", "bool", "str", "datetime"]
data = [[1, 1.1, True, "string", "2019-01-01 10:00:00"],
[2, 2, False, "string2", "2019-02-01 10:00:00"]]
return PlainFrame.from_plain(data=data, dtypes=cols, columns=cols)
@pytest.fixture
def plainframe_missings():
cols = ["int", "float", "bool", "str", "datetime"]
data = [[1, 1.1, True, "string", "2019-01-01 10:00:00"],
[2, NaN, False, "string2", "2019-02-01 10:00:00"],
[NULL, NULL, NULL, NULL, NULL]]
return PlainFrame.from_plain(data=data, dtypes=cols, columns=cols)
@pytest.fixture
def df_from_pandas():
df = pd.DataFrame(
{"int": [1, 2],
"int_na": [1, np.NaN],
"bool": [True, False],
"bool_na": [True, np.NaN],
"float": [1.2, 1.3],
"float_na": [1.2, np.NaN],
"str": ["foo", "bar"],
"str_na": ["foo", np.NaN],
"datetime": [pd.Timestamp("2019-01-01"), pd.Timestamp("2019-01-02")],
"datetime_na": [pd.Timestamp("2019-01-01"), pd.NaT]})
return df
@pytest.fixture
def df_from_spark(spark):
from pyspark.sql import types
values = collections.OrderedDict(
{"int": [1, 2, None],
"smallint": [1, 2, None],
"bigint": [1, 2, None],
"bool": [True, False, None],
"single": [1.0, NaN, None],
"double": [1.0, NaN, None],
"str": ["foo", "bar", None],
"datetime": [datetime.datetime(2019, 1, 1),
datetime.datetime(2019, 1, 2),
None],
"date": [datetime.date(2019, 1, 1),
datetime.date(2019, 1, 2),
None],
"map": [{"foo": "bar"}, {"bar": "foo"}, None],
"array": [[1, 2, 3], [3, 4, 5], None]}
)
data = list(zip(*values.values()))
c = types.StructField
columns = [c("int", types.IntegerType()),
c("smallint", types.ShortType()),
c("bigint", types.LongType()),
c("bool", types.BooleanType()),
c("single", types.FloatType()),
c("double", types.DoubleType()),
c("str", types.StringType()),
c("datetime", types.TimestampType()),
c("date", types.DateType()),
c("map", types.MapType(types.StringType(), types.StringType())),
c("array", types.ArrayType(types.IntegerType()))]
schema = types.StructType(columns)
return spark.createDataFrame(data, schema=schema)
def create_plain_frame(cols, rows, reverse_cols=False, reverse_rows=False):
"""Helper function to automatically create instances of PlainFrame.
`cols` contains typed column annotations like "col1:int".
"""
if reverse_cols:
cols = cols[::-1]
columns, dtypes = zip(*[col.split(":") for col in cols])
values = list(range(1, rows + 1))
mapping = {"str": list(map(str, values)),
"int": values,
"float": list(map(float, values)),
"bool": list([x % 2 == 0 for x in values]),
"datetime": ["2019-01-{:02} 10:00:00".format(x) for x in
values]}
data = [mapping[dtype] for dtype in dtypes]
data = list(zip(*data))
if reverse_rows:
data = data[::-1]
return PlainFrame.from_plain(data=data,
dtypes=dtypes,
columns=columns)
def create_plainframe_single(values, dtype):
"""Create some special scenarios more easily. Always assumes a single
column with identical name. Only values and dtype varies.
"""
data = [[x] for x in values]
dtypes = [dtype]
columns = ["name"]
return PlainFrame.from_plain(data=data, dtypes=dtypes, columns=columns)
def test_plainframe():
# incorrect instantiation with non tuples with non factory method
plain_column = PlainColumn.from_plain(name="int",
dtype="int",
values=[1, 2, 3])
# correct instantiation
PlainFrame(plaincolumns=(plain_column,))
with pytest.raises(ValueError):
PlainFrame(plaincolumns=[plain_column])
with pytest.raises(ValueError):
PlainFrame(plaincolumns=[1])
def test_plainframe_from_plain_pandas_empty():
# tests GH#29
df = PlainFrame.from_plain(data=[], columns=["col1:int", "col2:str"])
col_values = lambda x: df.get_column(x).values
assert df.n_rows == 0
assert df.columns == ["col1", "col2"]
assert df.dtypes == ["int", "str"]
assert col_values("col1") == tuple()
assert col_values("col2") == tuple()
dfp = pd.DataFrame(columns=["col1", "col2"], dtype=int)
df = PlainFrame.from_pandas(dfp)
col_values = lambda x: df.get_column(x).values
assert df.n_rows == 0
assert df.columns == ["col1", "col2"]
assert df.dtypes == ["int", "int"]
assert col_values("col1") == tuple()
assert col_values("col2") == tuple()
def test_plainframe_attributes(plainframe_missings):
df = plainframe_missings
col_values = lambda x: df.get_column(x).values
assert df.columns == ["int", "float", "bool", "str", "datetime"]
assert df.dtypes == ["int", "float", "bool", "str", "datetime"]
assert col_values("int") == (1, 2, NULL)
assert col_values("str") == ("string", "string2", NULL)
assert col_values("datetime")[0] == datetime.datetime(2019, 1, 1, 10)
def test_plainframe_modify():
# change single value
df_origin = create_plainframe_single([1, 2], "int")
df_target = create_plainframe_single([1, 1], "int")
assert df_origin.modify({"name": {1: 1}}) == df_target
# change multiple values
df_origin = create_plainframe_single([1, 2], "int")
df_target = create_plainframe_single([3, 3], "int")
assert df_origin.modify({"name": {0: 3, 1: 3}}) == df_target
# change multiple columns
df_origin = PlainFrame.from_plain(data=[[1, 2], ["a", "b"]],
dtypes=["int", "str"],
columns=["int", "str"],
row_wise=False)
df_target = PlainFrame.from_plain(data=[[1, 1], ["a", "a"]],
dtypes=["int", "str"],
columns=["int", "str"],
row_wise=False)
assert df_origin.modify({"int": {1: 1}, "str": {1: "a"}}) == df_target
def test_plainframe_modify_assertions():
# check incorrect type conversion
df = create_plainframe_single([1, 2], "int")
with pytest.raises(TypeError):
df.modify({"name": {0: "asd"}})
def test_plainframe_getitem_subset():
df = create_plain_frame(["col1:str", "col2:int", "col3:int"], 2)
df_sub = create_plain_frame(["col1:str", "col2:int"], 2)
cmp_kwargs = dict(assert_column_order=True,
assert_row_order=True)
# test list of strings, slice and string
df["col1", "col2"].assert_equal(df_sub, **cmp_kwargs)
df["col1":"col2"].assert_equal(df_sub, **cmp_kwargs)
df["col1"].assert_equal(df_sub["col1"], **cmp_kwargs)
# test incorrect type
with pytest.raises(ValueError):
df[{"col1"}]
# test invalid column name
with pytest.raises(ValueError):
df["non_existant"]
def test_plainframe_get_column():
df = create_plain_frame(["col1:str", "col2:int"], 2)
assert df.get_column("col1") is df.plaincolumns[0]
# check value error for non existent column
with pytest.raises(ValueError):
df.get_column("does_not_exist")
def test_plainframe_parse_typed_columns():
parse = PlainFrame._parse_typed_columns
# invalid splits
cols = ["col1:int", "col2"]
with pytest.raises(ValueError):
parse(cols)
# invalid types
cols = ["col1:asd"]
with pytest.raises(ValueError):
parse(cols)
# invalid abbreviations
cols = ["col1:a"]
with pytest.raises(ValueError):
parse(cols)
# correct types and columns
cols = ["col1:str", "col2:s",
"col3:int", "col4:i",
"col5:float", "col6:f",
"col7:bool", "col8:b",
"col9:datetime", "col10:d"]
names = ["col{}".format(x) for x in range(1, 11)]
dtypes = ["str", "str",
"int", "int",
"float", "float",
"bool", "bool",
"datetime", "datetime"]
result = (names, dtypes)
np_assert_equal(parse(cols), result)
def test_plainframe_from_plain():
# unequal elements per row
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1]],
columns=["a", "b"],
dtypes=["int", "int"])
# mismatch between number of columns and entries per row
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a"],
dtypes=["int", "int"])
# mismatch between number of dtypes and entries per row
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int"])
# incorrect dtypes
with pytest.raises(ValueError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "bad_type"])
# type errors conversion
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "str"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "bool"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[["1", 2],
["1", 2]],
columns=["a", "b"],
dtypes=["float", "int"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[["1", 2],
["1", 2]],
columns=["a", "b"],
dtypes=["str", "str"])
with pytest.raises(TypeError):
PlainFrame.from_plain(data=[[True, 2],
[False, 2]],
columns=["a", "b"],
dtypes=["datetime", "int"])
# correct implementation should not raise
PlainFrame.from_plain(data=[[1, 2],
[1, 2]],
columns=["a", "b"],
dtypes=["int", "int"])
def test_plainframe_to_plain():
columns = dtypes = ["int", "float", "bool", "str"]
data = [[1, 1.1, True, "string"],
[2, 2, False, "string2"]]
pf = PlainFrame.from_plain(data=data, columns=columns, dtypes=dtypes)
expected = (data, columns, dtypes)
assert pf.to_plain() == expected
def test_plainframe_from_dict():
data = collections.OrderedDict(
[("col1:int", [1, 2, 3]),
("col2:s", ["a", "b", "c"])]
)
df = PlainFrame.from_dict(data)
# check correct column order and dtypes
np_assert_equal(df.columns, ("col1", "col2"))
np_assert_equal(df.dtypes, ["int", "str"])
# check correct values
np_assert_equal(df.get_column("col1").values, (1, 2, 3))
np_assert_equal(df.get_column("col2").values, ("a", "b", "c"))
def test_plainframe_to_dict():
df = create_plain_frame(["col2:str", "col1:int"], 2)
to_dict = df.to_dict()
keys = list(to_dict.keys())
values = list(to_dict.values())
# check column order and dtypes
np_assert_equal(keys, ["col2:str", "col1:int"])
# check values
np_assert_equal(values[0], ["1", "2"])
np_assert_equal(values[1], [1, 2])
def test_plainframe_from_pandas(df_from_pandas):
df = df_from_pandas
df_conv = PlainFrame.from_pandas(df)
# check int to int
assert df_conv.get_column("int").dtype == "int"
assert df_conv.get_column("int").values == (1, 2)
# check bool to bool
assert df_conv.get_column("bool").dtype == "bool"
assert df_conv.get_column("bool").values == (True, False)
# check bool (object) to bool with nan
assert df_conv.get_column("bool_na").dtype == "bool"
assert df_conv.get_column("bool_na").values == (True, NULL)
# check float to float
assert df_conv.get_column("float").dtype == "float"
assert df_conv.get_column("float").values == (1.2, 1.3)
# check float to float with nan
assert df_conv.get_column("float_na").dtype == "float"
np_assert_equal(df_conv.get_column("float_na").values, (1.2, NaN))
# check str to str
assert df_conv.get_column("str").dtype == "str"
assert df_conv.get_column("str").values == ("foo", "bar")
# check str to str with nan
assert df_conv.get_column("str_na").dtype == "str"
assert df_conv.get_column("str_na").values == ("foo", NULL)
# check datetime to datetime
assert df_conv.get_column("datetime").dtype == "datetime"
assert df_conv.get_column("datetime").values == \
(datetime.datetime(2019, 1, 1), datetime.datetime(2019, 1, 2))
# check datetime to datetime with nan
assert df_conv.get_column("datetime_na").dtype == "datetime"
assert df_conv.get_column("datetime_na").values == (
datetime.datetime(2019, 1, 1), NULL)
def test_plainframe_from_pandas_assertions_missings_cast():
# check mixed dtype raise
df = pd.DataFrame({"mixed": [1, "foo bar"]})
with pytest.raises(TypeError):
PlainFrame.from_pandas(df)
# check assertion for incorrect forces
# too many types provided
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes=["int", "str"])
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes={"mixed": "str",
"dummy": "int"})
# invalid dtypes provided
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes=["not existant type"])
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes={"mixed": "not existant type"})
# invalid column names provided
with pytest.raises(ValueError):
PlainFrame.from_pandas(df, dtypes={"dummy": "str"})
# check int to forced int with nan
df = pd.DataFrame({"int": [1, np.NaN]})
df_conv = PlainFrame.from_pandas(df, dtypes=["int"])
assert df_conv.get_column("int").dtype == "int"
assert df_conv.get_column("int").values == (1, NULL)
# check force int to float
df = pd.DataFrame({"int": [1, 2]})
df_conv = PlainFrame.from_pandas(df, dtypes=["float"])
assert df_conv.get_column("int").dtype == "float"
assert df_conv.get_column("int").values == (1.0, 2.0)
# check force float to int
df = | pd.DataFrame({"float": [1.0, 2.0]}) | pandas.DataFrame |
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
from operator import itemgetter
import pandas as pd
import pickle
from scipy import sparse
from scipy.spatial.distance import cosine
import seaborn
from sklearn import preprocessing
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import KMeans
# ------------------------------------------------------------------------------
class Preprocess():
def __init__(self):
print('preprocessing')
self.df = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Data Commons Python Client API unit tests.
Unit tests for core methods in the Data Commons Python Client API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pandas.util.testing import assert_series_equal, assert_frame_equal
from unittest import mock
import datacommons as dc
import datacommons.utils as utils
import pandas as pd
import json
import unittest
def post_request_mock(*args, **kwargs):
""" A mock POST requests sent in the requests package. """
# Create the mock response object.
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
# Get the request json
req = kwargs['json']
headers = kwargs['headers']
# If the API key does not match, then return 403 Forbidden
if 'x-api-key' not in headers or headers['x-api-key'] != 'TEST-API-KEY':
return MockResponse({}, 403)
# Mock responses for post requests to get_property_labels.
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_property_labels']:
if req['dcids'] == ['geoId/0649670']:
# Response for sending a single dcid to get_property_labels
out_arcs = ['containedInPlace', 'name', 'geoId', 'typeOf']
res_json = json.dumps({
'geoId/0649670': {
'inLabels': [],
'outLabels': out_arcs
}
})
return MockResponse({"payload": res_json}, 200)
elif req['dcids'] == ['State', 'County', 'City']:
# Response for sending multiple dcids to get_property_labels
in_arcs = ['typeOf']
out_arcs = ['name', 'provenance', 'subClassOf', 'typeOf', 'url']
res_json = json.dumps({
'City': {'inLabels': in_arcs, 'outLabels': out_arcs},
'County': {'inLabels': in_arcs, 'outLabels': out_arcs},
'State': {'inLabels': in_arcs, 'outLabels': out_arcs}
})
return MockResponse({'payload': res_json}, 200)
elif req['dcids'] == ['dc/MadDcid']:
# Response for sending a dcid that doesn't exist to get_property_labels
res_json = json.dumps({
'dc/MadDcid': {
'inLabels': [],
'outLabels': []
}
})
return MockResponse({'payload': res_json}, 200)
elif req['dcids'] == []:
# Response for sending no dcids to get_property_labels
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_property_values
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_property_values']:
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'containedInPlace'\
and req['value_type'] == 'Town':
# Response for sending a request for getting Towns containedInPlace of
# Santa Clara County and Montgomery County.
res_json = json.dumps({
'geoId/06085': {
'in': [
{
'dcid': 'geoId/0644112',
'name': 'Los Gatos',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
{
'dcid': 'geoId/0643294',
'name': '<NAME>',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
}
],
'out': []
},
'geoId/24031': {
'in': [
{
'dcid': 'geoId/2462850',
'name': 'Poolesville',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'name':
# Response for sending a request for the name of multiple dcids.
res_json = json.dumps({
'geoId/06085': {
'in': [],
'out': [
{
'value': 'Santa Clara County',
'provenanceId': 'dc/sm3m2w3',
},
]
},
'geoId/24031': {
'in': [],
'out': [
{
'value': 'Montgomery County',
'provenanceId': 'dc/sm3m2w3',
},
]
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'madProperty':
# Response for sending a request with a property that does not exist.
res_json = json.dumps({
'geoId/06085': {
'in': [],
'out': []
},
'geoId/24031': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'dc/MadDcid']\
and req['property'] == 'containedInPlace':
# Response for sending a request with a single dcid that does not exist.
res_json = json.dumps({
'geoId/06085': {
'in': [
{
'dcid': 'geoId/0644112',
'name': '<NAME>',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
],
'out': []
},
'dc/MadDcid': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid']:
# Response for sending a request where both dcids do not exist.
res_json = json.dumps({
'dc/MadDcid': {
'in': [],
'out': []
},
'dc/MadderDcid': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == [] and req['property'] == 'containedInPlace':
# Response for sending a request where no dcids are given.
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_triples
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_triples']:
if req['dcids'] == ['geoId/06085', 'geoId/24031']:
# Response for sending a request with two valid dcids.
res_json = json.dumps({
'geoId/06085': [
{
"subjectId": "geoId/06085",
"predicate": "name",
"objectValue": "Santa Clara County"
},
{
"subjectId": "geoId/0649670",
"subjectName": "Mountain View",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/06085",
"objectName": "Santa Clara County"
},
{
"subjectId": "geoId/06085",
"predicate": "containedInPlace",
"objectId": "geoId/06",
"objectName": "California"
},
],
'geoId/24031': [
{
"subjectId": "geoId/24031",
"predicate": "name",
"objectValue": "Montgomery County"
},
{
"subjectId": "geoId/2467675",
"subjectName": "Rockville",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/24031",
"objectName": "Montgomery County"
},
{
"subjectId": "geoId/24031",
"predicate": "containedInPlace",
"objectId": "geoId/24",
"objectName": "Maryland"
},
]
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'dc/MadDcid']:
# Response for sending a request where one dcid does not exist.
res_json = json.dumps({
'geoId/06085': [
{
"subjectId": "geoId/06085",
"predicate": "name",
"objectValue": "Santa Clara County"
},
{
"subjectId": "geoId/0649670",
"subjectName": "Mountain View",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/06085",
"objectName": "Santa Clara County"
},
{
"subjectId": "geoId/06085",
"predicate": "containedInPlace",
"objectId": "geoId/06",
"objectName": "California"
},
],
'dc/MadDcid': []
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid']:
# Response for sending a request where both dcids do not exist.
res_json = json.dumps({
'dc/MadDcid': [],
'dc/MadderDcid': []
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == []:
# Response for sending a request where no dcids are given.
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Otherwise, return an empty response and a 404.
return MockResponse({}, 404)
class TestGetPropertyLabels(unittest.TestCase):
""" Unit tests for get_property_labels. """
@mock.patch('requests.post', side_effect=post_request_mock)
def test_single_dcid(self, post_mock):
""" Calling get_property_labels with a single dcid returns a valid
result.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels(['geoId/0649670'])
self.assertDictEqual(out_props,
{'geoId/0649670': ["containedInPlace", "name", "geoId", "typeOf"]})
# Test with out=False
in_props = dc.get_property_labels(['geoId/0649670'], out=False)
self.assertDictEqual(in_props, {'geoId/0649670': []})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_multiple_dcids(self, post_mock):
""" Calling get_property_labels returns valid results with multiple
dcids.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
dcids = ['State', 'County', 'City']
expected_in = ["typeOf"]
expected_out = ["name", "provenance", "subClassOf", "typeOf", "url"]
# Test for outgoing property labels
out_props = dc.get_property_labels(dcids)
self.assertDictEqual(out_props, {
'State': expected_out,
'County': expected_out,
'City': expected_out,
})
# Test for incoming property labels
in_props = dc.get_property_labels(dcids, out=False)
self.assertDictEqual(in_props, {
'State': expected_in,
'County': expected_in,
'City': expected_in,
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_dcids(self, post_mock):
""" Calling get_property_labels with dcids that do not exist returns empty
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels(['dc/MadDcid'])
self.assertDictEqual(out_props, {'dc/MadDcid': []})
# Test for incoming property labels
in_props = dc.get_property_labels(['dc/MadDcid'], out=False)
self.assertDictEqual(in_props, {'dc/MadDcid': []})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_no_dcids(self, post_mock):
""" Calling get_property_labels with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels([])
self.assertDictEqual(out_props, {})
# Test for incoming property labels
in_props = dc.get_property_labels([], out=False)
self.assertDictEqual(in_props, {})
class TestGetPropertyValues(unittest.TestCase):
""" Unit tests for get_property_values. """
# --------------------------- STANDARD UNIT TESTS ---------------------------
@mock.patch('requests.post', side_effect=post_request_mock)
def test_multiple_dcids(self, post_mock):
""" Calling get_property_values with multiple dcids returns valid
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
dcids = ['geoId/06085', 'geoId/24031']
# Get the containedInPlace Towns for Santa Clara and Montgomery County.
towns = dc.get_property_values(
dcids, 'containedInPlace', out=False, value_type='Town')
self.assertDictEqual(towns, {
'geoId/06085': ['geoId/0643294', 'geoId/0644112'],
'geoId/24031': ['geoId/2462850']
})
# Get the name of Santa Clara and Montgomery County.
names = dc.get_property_values(dcids, 'name')
self.assertDictEqual(names, {
'geoId/06085': ['Santa Clara County'],
'geoId/24031': ['Montgomery County']
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_dcids(self, post_mock):
""" Calling get_property_values with dcids that do not exist returns empty
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
bad_dcids_1 = ['geoId/06085', 'dc/MadDcid']
bad_dcids_2 = ['dc/MadDcid', 'dc/MadderDcid']
# Get entities containedInPlace of Santa Clara County and a dcid that does
# not exist.
contained_1 = dc.get_property_values(bad_dcids_1, 'containedInPlace', out=False)
self.assertDictEqual(contained_1, {
'geoId/06085': ['geoId/0644112'],
'dc/MadDcid': []
})
# Get entities containedInPlace for two dcids that do not exist.
contained_2 = dc.get_property_values(bad_dcids_2, 'containedInPlace')
self.assertDictEqual(contained_2, {
'dc/MadDcid': [],
'dc/MadderDcid': []
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_property(self, post_mock):
""" Calling get_property_values with a property that does not exist returns
empty results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get propery values for a property that does not exist.
prop_vals = dc.get_property_values(
['geoId/06085', 'geoId/24031'], 'madProperty')
self.assertDictEqual(prop_vals, {
'geoId/06085': [],
'geoId/24031': []
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_no_dcids(self, post_mock):
""" Calling get_property_values with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get property values with an empty list of dcids.
prop_vals = dc.get_property_values([], 'containedInPlace')
self.assertDictEqual(prop_vals, {})
# ---------------------------- PANDAS UNIT TESTS ----------------------------
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series(self, post_mock):
""" Calling get_property_values with a Pandas Series returns the correct
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# The given and expected series.
dcids = pd.Series(['geoId/06085', 'geoId/24031'])
expected = pd.Series([
['geoId/0643294', 'geoId/0644112'],
['geoId/2462850']
])
# Call get_property_values with the series as input
actual = dc.get_property_values(
dcids, 'containedInPlace', out=False, value_type='Town')
assert_series_equal(actual, expected)
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series_bad_dcids(self, post_mock):
""" Calling get_property_values with a Pandas Series and dcids that does not
exist resturns an empty result.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# The given and expected series
bad_dcids_1 = pd.Series(['geoId/06085', 'dc/MadDcid'])
bad_dcids_2 = pd.Series(['dc/MadDcid', 'dc/MadderDcid'])
expected_1 = pd.Series([['geoId/0644112'], []])
expected_2 = pd.Series([[], []])
# Call get_property_values with series as input
actual_1 = dc.get_property_values(bad_dcids_1, 'containedInPlace', out=False)
actual_2 = dc.get_property_values(bad_dcids_2, 'containedInPlace', out=False)
# Assert the results are correct
assert_series_equal(actual_1, expected_1)
assert_series_equal(actual_2, expected_2)
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series_bad_property(self, post_mock):
""" Calling get_property_values with a Pandas Series and a property that
does not exist returns an empty result.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# The input and expected series
dcids = pd.Series(['geoId/06085', 'geoId/24031'])
expected = | pd.Series([[], []]) | pandas.Series |
import altair as alt
import pandas as pd
import numpy as np
import sys
TRUSTTSV = sys.argv[1]
CELLNUM = sys.argv[2]
BCRTEMP = """
<div style="width: 100%; height: 700px; position: relative; clear:both;overflow: hidden; white-space:nowrap; padding-top: 10px;clear:both;">
<div style="width: 110%, position: absolute; clear:both;">
<p style="text-align: left; color: #094D92; font-size: 30px"> BCR STATS: </p>
</div>
FREQHIST
<div style="width: 50%; float: right; padding-top: 50px;">
TABLE
</div>
DotPLOT
</div>
"""
# colnames for chain 1 and 2 - for B cells, the heavy chain is chain 1, and light chain is chain 2
TRUST_columns = ["V_gene",
"D_gene",
"J_gene",
"C_gene",
"cdr3_nt",
"cdr3_aa",
"read_cnt",
"consensus_id",
"CDR3_germline_similarity",
"consensus_full_length"]
TRUSTaligned = pd.read_csv(TRUSTTSV,
delimiter='\t',
index_col='#barcode')
Bcells = TRUSTaligned[TRUSTaligned['cell_type'] == 'B']
print(CELLNUM)
CELLNUM = int(CELLNUM)
print(type(CELLNUM))
# Calculate the percentages of heavy, light, and paired chains found in the B cells
No_BCR = CELLNUM - len(Bcells.index)
L_only = len(Bcells[(Bcells['chain1'] == "*") & (Bcells['chain2'] != "*")])
H_only = len(Bcells[(Bcells['chain2'] == "*") & (Bcells['chain1'] != "*")])
paired = len(Bcells[(Bcells['chain1'] != "*") & (Bcells['chain2'] != "*")])
BCR_stats = pd.DataFrame([No_BCR, L_only, H_only, paired, CELLNUM],
index=['No BCR',
"Light Chain Only",
"Heavy Chain Only",
"Paired Chains",
"Total"],
columns=['Number of Cells'])
BCR_stats['Percent of Cells'] = (BCR_stats['Number of Cells']*100/CELLNUM)
BCR_stats['Percent of Cells'] = round(BCR_stats['Percent of Cells'],
2).astype("str")+"%"
BCRSTATSTABLE = BCR_stats.to_html()
BCRSTATSTABLE = BCRSTATSTABLE.replace(
""" border="1" """, " ").replace(
"text-align: right", "text-align: left")
# split the heavy and light chain info out of its csv form
Bcells = Bcells.join(pd.DataFrame(Bcells.chain1.str.split(",").tolist(),
columns=['H_'+x for x in TRUST_columns],
index=Bcells.index))
Bcells = Bcells.join(pd.DataFrame(Bcells.chain2.str.split(",").tolist(),
columns=['L_'+x for x in TRUST_columns],
index=Bcells.index))
Bcells = Bcells.drop(columns=['chain1',
'chain2',
'secondary_chain1',
'secondary_chain2'])
# calculate frequencies for freq histogram
lightchainaa = pd.DataFrame(Bcells.groupby(
'L_cdr3_aa').size(), columns=['freq'])
lightchainaa['chain'] = 'light'
heavychainaa = pd.DataFrame(Bcells.groupby(
'H_cdr3_aa').size(), columns=['freq'])
heavychainaa['chain'] = 'heavy'
aa_freq = | pd.concat([lightchainaa, heavychainaa]) | pandas.concat |
import geopandas as gpd
import pandas as pd
import os
import numpy as np
import sys
import itertools
import ast
import math
from scipy import stats
def main():
'''
Traffic speed assignment script
vehicle_id, edge_path, time_stamp
'''
data_path,calc_path,output_path = load_config()['paths']['data'],load_config()['paths']['calc'],load_config()['paths']['output']
edges_in = os.path.join(output_path, 'transport cvts analysis', 'results', 'traffic_count','road_network.shp')
routes_in = os.path.join(output_path, 'transport cvts analysis', 'results', 'routes_collected','routes.csv')
edges = gpd.read_file(edges_in)
edges.columns = map(str.lower, edges.columns)
# get the right linelength
edges['length'] = edges.geometry.apply(line_length)
length_attr = list(zip(edges['g_id'].values.tolist(),edges['length'].values.tolist()))
routes_df = | pd.read_csv(routes_in) | pandas.read_csv |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = | pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"]) | pandas.MultiIndex.from_tuples |
import time
import pandas as pd
import numpy as np
import sys
CITY_DATA = {'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv'}
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = input(
'Please type in one of these cities you\'d like to investigate: chicago, new york city or washington : ').lower()
cities = ['chicago', 'new york city', 'washington']
while city not in cities:
print('Ups. There must be something wrong. \n You can only choose between the given three cities.')
city = input('Please type in one of these cities: chicago, new york city or washington : ').lower()
# TO DO: get user input for month (all, january, february, ... , june)
month = input(
'Please type in a month you\'d like to investigate. If you\'d like to choose all, type in all : ').lower()
months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']
while month not in months:
print('Ups. There must be something wrong.')
month = input(
'Please type in a month you\'d like to investigate. If you\'d like to choose all, type in all : ').lower()
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = input(
'Please type in a day of week you\'d like to explore. If you\'d like to choose all, type in all : ').lower()
days = ['all', 'monday', 'tuesday', 'wendsday', 'thursday', 'friday', 'saturday', 'sunday']
while day not in days:
print('Ups. There must be something wrong.')
day = input(
'Please type in a day of week you\'d like to explore. If you\'d like to choose all, type in all : ').lower()
print('-' * 40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# load data file into a dataframe
df = | pd.read_csv(CITY_DATA[city]) | pandas.read_csv |
from math import sqrt
import time
import os
import sys
import pandas as pd
import gurobipy as gp
from gurobipy import GRB
import numpy as np
import jax
import jax.numpy as jnp
from architect.optimization import (
AdversarialLocalOptimizer,
)
from architect.examples.satellite_stl.sat_design_problem import (
make_sat_design_problem,
)
from architect.examples.satellite_stl.sat_stl_specification import (
make_sat_rendezvous_specification,
)
def L1Norm(model, x):
# From Dawei's STL planning code
xvar = model.addVars(len(x), lb=-GRB.INFINITY)
abs_x = model.addVars(len(x))
model.update()
xvar = [xvar[i] for i in range(len(xvar))]
abs_x = [abs_x[i] for i in range(len(abs_x))]
for i in range(len(x)):
model.addConstr(xvar[i] == x[i])
model.addConstr(abs_x[i] == gp.abs_(xvar[i]))
return sum(abs_x)
def main(seed: int):
# Problem parameters
t_sim = 200.0
dt = 2.0
time_steps = int(t_sim // dt)
n_dims = 6
n_controls = 3
speed_threshold = 0.1 # m/s
docking_threshold = 0.1 # m
min_waiting_radius = 2.0 # m
# Other dynamics parameters
MU = 3.986e14 # Earth's gravitational parameter (m^3 / s^2)
A_LEO = 353e3 # GEO semi-major axis (m)
M_CHASER = 500 # chaser satellite mass
N = sqrt(MU / A_LEO ** 3) # mean-motion parameter
BIGM = 1e4 # for binary logic constraints
# A = np.array(
# [
# [1.0542066, 0.0, 0.0, 1.9879397, 0.3796246, 0.0],
# [-0.00688847, 1.0, 0.0, -0.37962466, 1.9517579, 0.0],
# [0.0, 0.0, 0.9819311, 0.0, 0.0, 1.9879396],
# [0.05404278, 0.0, 0.0, 0.98193115, 0.37847722, 0.0],
# [-0.01032022, 0.0, 0.0, -0.3784773, 0.92772454, 0.0],
# [0.0, 0.0, -0.01801426, 0.0, 0.0, 0.9819312],
# ]
# )
# B = np.array(
# [
# [0.00398793, 0.00050678, 0.0],
# [-0.00050678, 0.00395173, 0.0],
# [0.0, 0.0, 0.00398793],
# [0.00397588, 0.00075925, 0.0],
# [-0.00075925, 0.00390352, 0.0],
# [0.0, 0.0, 0.00397588],
# ]
# )
prng_key = jax.random.PRNGKey(seed)
# Make the design problem
specification_weight = 2e4
prng_key, subkey = jax.random.split(prng_key)
sat_design_problem = make_sat_design_problem(
specification_weight, time_steps, dt, mission_1=True
)
# Create a new model
m = gp.Model("mission_1")
# m.setParam(GRB.Param.OutputFlag, 0)
m.setParam(GRB.Param.MIPGap, 1)
m.setParam(GRB.Param.TimeLimit, 500)
# Create a trajectory optimization problem with states, reference states, control
# inputs, and feedback matrix
x = m.addMVar((time_steps, n_dims), lb=-100.0, ub=100.0, name="x")
x_ref = m.addMVar((time_steps, n_dims), lb=-100.0, ub=100.0, name="x_ref")
u = m.addMVar((time_steps, n_controls), lb=-1000.0, ub=1000.0, name="u")
u_ref = m.addMVar((time_steps, n_controls), lb=-1000.0, ub=1000.0, name="u_ref")
# We cannot simultaneously optimize K, so set it to be something reasonable
K = np.array(
[
[27.995287, -5.337199, -0.6868453, 72.93794, 3.8204267, 0.19549589],
[3.8033, 25.740, -2.8299, -2.809, 72.12, -0.5243],
[3.2838054, -1.0270333, 24.277672, 0.7584929, -0.85453165, 71.199554],
]
)
# Initial condition constraints
ep = sat_design_problem.exogenous_params.sample(subkey)
m.addConstr(x[0, 0] == ep[0].item(), "px_0")
m.addConstr(x[0, 1] == ep[1].item(), "py_0")
m.addConstr(x[0, 2] == ep[2].item(), "pz_0")
m.addConstr(x[0, 3] == ep[3].item(), "vx_0")
m.addConstr(x[0, 4] == ep[4].item(), "vy_0")
m.addConstr(x[0, 5] == ep[5].item(), "vz_0")
# Encode the dynamics in a simple discrete-time form
for t in range(1, time_steps):
# Control law
m.addConstr(
u[t - 1, :] == u_ref[t - 1, :] - K @ x[t - 1, :] + K @ x_ref[t - 1, :],
f"u({t})",
)
# The linear DT dynamics are likely more accurate, but the approximate CT
# dyanmics are easier to handle. The CT dynamics find a feasible solution
# within 500 s most of the time, while the DT dynamics do not, but the solution
# found using CT dynamics doesn't really work since the approximation error is
# too high.
# # Linear discrete-time dynamics
# m.addConstr(x[t, :] == A @ x[t - 1, :] + B @ u[t - 1, :])
# CHW dynamics
m.addConstr(x[t, 0] == x[t - 1, 0] + dt * x[t - 1, 3], f"d(px)/dt({t})")
m.addConstr(x[t, 1] == x[t - 1, 1] + dt * x[t - 1, 4], f"d(py)/dt({t})")
m.addConstr(x[t, 2] == x[t - 1, 2] + dt * x[t - 1, 5], f"d(pz)/dt({t})")
m.addConstr(
x[t, 3]
== x[t - 1, 3]
+ dt
* (3 * N ** 2 * x[t - 1, 0] + 2 * N * x[t - 1, 4] + u[t - 1, 0] / M_CHASER),
f"d(vx)/dt({t})",
)
m.addConstr(
x[t, 4]
== x[t - 1, 4] + dt * (-2 * N * x[t - 1, 3] + u[t - 1, 1] / M_CHASER),
f"d(vy)/dt({t})",
)
m.addConstr(
x[t, 5]
== x[t - 1, 5] + dt * (-(N ** 2) * x[t - 1, 2] + u[t - 1, 2] / M_CHASER),
f"d(vz)/dt({t})",
)
# Add the "eventually reach the target" constraint.
# Start by encoding the robustness of the docking distance predicate, and then
# encode the eventually robustness at each timestep as being the max over
# remaining timesteps
r_goal = m.addVars(time_steps, lb=-BIGM, ub=BIGM, name="r_goal")
for t in range(time_steps):
distance_to_goal_t = L1Norm(m, [xi for xi in x[t, :3]])
m.addConstr(r_goal[t] == docking_threshold - distance_to_goal_t)
r_f_goal = m.addVars(time_steps, lb=-BIGM, ub=BIGM, name="r_f_goal")
m.addConstr(r_f_goal[time_steps - 1] == r_goal[time_steps - 1])
for t in reversed(range(time_steps - 1)):
m.addConstr(r_f_goal[t] == gp.max_([r_goal[t], r_f_goal[t + 1], -BIGM]))
# Require that the eventually robustness is positive at the start of the trace
m.addConstr(r_f_goal[0] >= 0)
# Now add robustness traces for being close to the target and being slow
r_close = m.addVars(time_steps, lb=-BIGM, ub=BIGM, name="r_close")
r_not_close = m.addVars(time_steps, lb=-BIGM, ub=BIGM, name="r_close")
r_slow = m.addVars(time_steps, lb=-BIGM, ub=BIGM, name="r_slow")
for t in range(time_steps):
distance_to_goal_t = L1Norm(m, [xi for xi in x[t, :3]])
m.addConstr(r_close[t] == min_waiting_radius - distance_to_goal_t)
m.addConstr(r_not_close[t] == -r_close[t])
speed_t = L1Norm(m, [xi for xi in x[t, 3:]])
m.addConstr(r_slow[t] == speed_threshold - speed_t)
# Make a robustness trace for "always slow"
r_g_slow = m.addVars(time_steps, lb=-BIGM, ub=BIGM, name="r_slow")
m.addConstr(r_g_slow[time_steps - 1] == r_slow[time_steps - 1])
for t in reversed(range(time_steps - 1)):
m.addConstr(r_g_slow[t] == gp.min_([r_slow[t], r_g_slow[t + 1], BIGM]))
# Make a robustness trace for "not close until always slow".
# This requires a trace for "always not close until this time"
r_not_close_until = m.addVars(time_steps, lb=-BIGM, ub=BIGM, name="r_slow")
m.addConstr(r_not_close_until[0] == r_not_close[0])
for t in range(1, time_steps):
m.addConstr(
r_not_close_until[t]
== gp.min_([r_not_close[t], r_not_close_until[t - 1], BIGM])
)
# Make a robustness trace for the until happening at each timestep
r_until_here = m.addVars(time_steps, lb=-BIGM, ub=BIGM, name="r_slow")
for t in range(time_steps):
m.addConstr(
r_until_here[t] == gp.min_([r_not_close_until[t], r_g_slow[t], BIGM])
)
# The until robustness is the maximum of r_until_here over the rest of the trace
r_until = m.addVars(time_steps, lb=-BIGM, ub=BIGM, name="r_until")
m.addConstr(r_until[time_steps - 1] == r_until_here[time_steps - 1])
for t in reversed(range(time_steps - 1)):
m.addConstr(r_until[t] == gp.max_([r_until_here[t], r_until[t + 1], -BIGM]))
# Require that the until robustness is positive at the start of the trace
m.addConstr(r_until[0] >= 0)
# Add an objective for the total impulse required for the manuever along with
# the robustness
impulses = [dt * L1Norm(m, [ui for ui in u[t]]) for t in range(time_steps)]
r_total = m.addVar(lb=-BIGM, ub=BIGM, name="r_total")
m.addConstr(r_total == gp.min_([r_until[0], r_f_goal[0], BIGM]))
total_effort = sum(impulses)
m.setObjective(total_effort / 2e4 - r_total, GRB.MINIMIZE)
# Solve the problem
start = time.time()
m.optimize()
end = time.time()
mip_solve_time = end - start
print("solving MIP takes %.3f s" % (mip_solve_time))
try:
# Extract the design parameters
x_ref_opt = x_ref.X
u_ref_opt = u_ref.X
planned_trajectory = np.hstack((u_ref_opt, x_ref_opt))
design_params_np = np.concatenate(
(K.reshape(-1), planned_trajectory.reshape(-1))
)
dp_opt = jnp.array(design_params_np)
# Create the optimizer
ad_opt = AdversarialLocalOptimizer(sat_design_problem)
# Run a simulation for plotting the optimal solution with the original exogenous
# parameters
ep_opt = jnp.array([11.5, 11.5, 0, 0, 0, 0])
state_trace, total_effort = sat_design_problem.simulator(dp_opt, ep_opt)
# Get the robustness of this solution
stl_specification = make_sat_rendezvous_specification()
t_range = jnp.linspace(0.0, time_steps * dt, state_trace.shape[0])
signal = jnp.vstack((t_range.reshape(1, -1), state_trace.T)) # type: ignore
original_robustness = stl_specification(signal)
original_cost = -original_robustness[1, 0] + total_effort / specification_weight
# Do the adversarial optimization
sat_design_problem.design_params.set_values(dp_opt)
sat_design_problem.exogenous_params.set_values(ep_opt)
prng_key, subkey = jax.random.split(prng_key)
(
dp_opt,
ep_opt,
cost,
cost_gap,
opt_time,
t_jit,
rounds,
pop_size,
) = ad_opt.optimize(
subkey,
disp=False,
rounds=10,
n_init=8,
stopping_tolerance=0.1,
maxiter=500,
jit=True,
)
# Run another sim to get the post-adversary robustness
state_trace, total_effort = sat_design_problem.simulator(dp_opt, ep_opt)
# Get the robustness of this solution
stl_specification = make_sat_rendezvous_specification()
t_range = jnp.linspace(0.0, time_steps * dt, state_trace.shape[0])
signal = jnp.vstack((t_range.reshape(1, -1), state_trace.T)) # type: ignore
robustness = stl_specification(signal)
return [
{"seed": seed, "measurement": "Cost", "value": cost},
{"seed": seed, "measurement": "Pre-adversary cost", "value": original_cost},
{"seed": seed, "measurement": "Cost gap", "value": cost_gap},
{
"seed": seed,
"measurement": "Optimization time (s)",
"value": mip_solve_time,
},
{"seed": seed, "measurement": "Compilation time (s)", "value": 0.0},
{"seed": seed, "measurement": "STL Robustness", "value": robustness[1, 0]},
{
"seed": seed,
"measurement": "Pre-adversary STL Robustness",
"value": original_robustness[1, 0],
},
{"seed": seed, "measurement": "Total effort (N-s)", "value": total_effort},
{"seed": seed, "measurement": "Optimization rounds", "value": rounds + 1},
{"seed": seed, "measurement": "Final population size", "value": pop_size},
{"seed": seed, "measurement": "Feasible", "value": True},
]
except gp.GurobiError:
return [
{"seed": seed, "measurement": "Feasible", "value": False},
{
"seed": seed,
"measurement": "Optimization time (s)",
"value": mip_solve_time,
},
]
# x_opt = x.X
# ax = plt.axes(projection="3d")
# ax.plot3D(x_opt[:, 0], x_opt[:, 1], x_opt[:, 2])
# ax.plot3D(0.0, 0.0, 0.0, "ko")
# ax.plot3D(x_opt[0, 0], x_opt[0, 1], x_opt[0, 2], "ks")
# plt.show()
# r_goals = []
# r_f_goals = []
# for t in range(time_steps):
# r_goals.append(r_goal[t].X)
# r_f_goals.append(r_f_goal[t].X)
# plt.plot(r_goals, label="goal")
# plt.plot(r_f_goals, label="eventually goal")
# plt.legend()
# plt.show()
# r_not_closes = []
# r_slows = []
# r_g_slows = []
# r_untils = []
# r_not_close_untils = []
# for t in range(time_steps):
# r_not_closes.append(r_not_close[t].X)
# r_slows.append(r_slow[t].X)
# r_g_slows.append(r_g_slow[t].X)
# r_not_close_untils.append(r_not_close_until[t].X)
# r_untils.append(r_until[t].X)
# plt.plot(r_not_closes, label="not_close")
# plt.plot(r_slows, label="slow")
# plt.plot(r_g_slows, label="always slow")
# plt.plot(r_not_close_untils, label="not close until t")
# plt.plot(r_untils, label="not close until always slow")
# plt.legend()
# plt.show()
if __name__ == "__main__":
results_df = | pd.DataFrame() | pandas.DataFrame |
# <NAME>, <NAME>
# main.py
# Processes exoplanet data
import matplotlib
import sys
import pandas as pd
import numpy as np
from sklearn import datasets, linear_model
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
db_path = 'data/kepler.csv'
def main():
filein = open(db_path, 'r')
# create an empty db
db = []
# parse input file
for line in filein.readlines():
finalword = ''
sections = line.split('"')
# parse by splitting " sections
for i, sect in enumerate(sections):
# if current section was in a "
if i%2 == 1:
newword = ''
# change , to ;
for i, c in enumerate(sect):
if sect[i] == ',':
newword += ';'
else:
newword += c
finalword += newword
else:
finalword += sect
# only add to db if the columns are correct
noendline = finalword[:-1]
tokens = noendline.split(',')
if len(tokens) == 98:
db.append(tokens)
# DEBUG
print(len(db))
new_db = np.array(db)
# names of each variable (columns)
row_r1 = new_db[0, :]
col = row_r1.tolist()
# delete first row in db (just contains the headers)
newer_db=np.delete(new_db,0,0)
# create a dataframe from the columns and data
df = pd.DataFrame(data=newer_db, columns = col)
# replacing empty spaces with 0's
df["radius"]=df["radius"].replace({"": 0})
df["semi_major_axis"]=df["semi_major_axis"].replace({"":0})
df["mass"]=df["mass"].replace({"":0})
# changing the type of data from str to num
df["radius"] = | pd.to_numeric(df["radius"]) | pandas.to_numeric |
# # Customer/Card Detection techniques
#
# One of the main challenges of this competition (https://www.kaggle.com/c/ieee-fraud-detection) is **how you obtain the card or the customer whose the transactions, fraudulent or not, belong to.** It has been said that the datasets have all the information to get this, but we faced two problems:
# 1. It is **anonymized**, they're collected by Vesta’s fraud protection system and digital security partners.
# 2. The true meaning of the columns that seems to compose the card or the customer is somewhat obscure too. **The field names are masked** and pairwise dictionary will not be provided for privacy protection and contract agreement.
#
# > For this reason, the identification of both customer or card is one of the most discussed issues in the competition these datasets come from, and the approachs and solutions are quite different.
# > **In this notebook, we are based partially on the first approach coming from one of the winners of the competition <NAME> teaming with <NAME>. We will use the same analysis techniques, but how we implement the model using this information will finally difer.**
#
#
# ## Our approaches
#
# ### `uid1`
# We used a simple approach from the more evident columns:
# * `card1`: Probably, card number given by the issuing company or maybe customer number.
# * `addr1`: Billing address zip code
# * `D1achr`: `D1` could be or the days passed between the transaction and the card issue date, or between transaction and the first transaction done with it. For us, this does not really matters, as can be used them both to identify the card. We made this attribute substracting `D1` (in days) to the column `TransactionDelt` (in seconds) with the formula:
#
# **`D1achr` = `TransactionDT` /(60x60x24) - `D1`**
# **`uid1` = `card1` + `addr1` + `D1achr`**
#
# ---
#
# ### `uid2`
# Our second approach was to add more card and address field to get the unique card or user id, similar to the choice made by <NAME> (https://www.kaggle.com/c/ieee-fraud-detection/discussion/111696). When doing the FE, this uid has been really helpful, and the aggregations created with this uid improves greatly the model.
#
# **`uid2` = `card1` + `card2` ... + `card6` + `addr1` + `addr2` + `D1achr` + `ProductCD`**
#
# ---
#
# ### `uid3`
#
# Apart from these approaches, we wanted to replicate the analysis done by <NAME> performing adversarial validation which is based on mixing the data from train and test, removing the business target (`isFraud`) and transaction identification columns and runing a model to determine if we can predict which observations are from train and which from test, and assuming that the most important features are strogly related to the customer identification.
#
# https://www.kaggle.com/c/ieee-fraud-detection/discussion/111510
#
# **We will use a combination of the three approaches to create new features that identifies the card and new aggregates from them.**
get_ipython().system('pip -q install --upgrade pip')
get_ipython().system('pip -q install --upgrade seaborn')
import gc
import catboost
import lightgbm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
import warnings
warnings.filterwarnings("ignore")
##### Functions
def get_denominator(serie):
array_n = np.array(serie)
uniques_n = np.unique(array_n[~np.isnan(array_n)])
result = None
for i in range(1,1000):
decimals = uniques_n*i - np.floor(uniques_n*i)
for decimal in decimals:
integer = []
if ((decimal < 0.001) | (decimal > 0.999)):
integer.append(True)
else:
integer.append(False)
break
if all(integer):
result = i
break
print('denominator', serie.name, ':', result)
def get_fraud_weights(serie):
head = 10
values = list(serie.value_counts(dropna=False).index[:head])
df_wg_fraud = | pd.DataFrame(columns=[serie.name, 'count', 'weight fraud']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from rdt import HyperTransformer
from rdt.transformers import OneHotEncodingTransformer
def get_input_data_with_nan():
data = pd.DataFrame({
'integer': [1, 2, 1, 3, 1],
'float': [0.1, 0.2, 0.1, np.nan, 0.1],
'categorical': ['a', 'b', np.nan, 'b', 'a'],
'bool': [False, np.nan, False, True, False],
'datetime': [
np.nan, '2010-02-01', '2010-01-01', '2010-02-01', '2010-01-01'
],
'names': ['Jon', 'Arya', 'Sansa', 'Jon', 'Robb'],
})
data['datetime'] = pd.to_datetime(data['datetime'])
return data
def get_input_data_without_nan():
data = pd.DataFrame({
'integer': [1, 2, 1, 3],
'float': [0.1, 0.2, 0.1, 0.1],
'categorical': ['a', 'b', 'b', 'a'],
'bool': [False, False, True, False],
'datetime': [
'2010-02-01', '2010-01-01', '2010-02-01', '2010-01-01'
],
'names': ['Jon', 'Arya', 'Sansa', 'Jon'],
})
data['datetime'] = pd.to_datetime(data['datetime'])
data['bool'] = data['bool'].astype('O') # boolean transformer returns O instead of bool
return data
def get_transformed_data():
return pd.DataFrame({
'integer': [1, 2, 1, 3],
'float': [0.1, 0.2, 0.1, 0.1],
'categorical': [0.75, 0.25, 0.25, 0.75],
'bool': [0.0, 0.0, 1.0, 0.0],
'datetime': [
1.2649824e+18,
1.262304e+18,
1.2649824e+18,
1.262304e+18
],
'names': [0.25, 0.875, 0.625, 0.25]
})
def get_transformed_nan_data():
return pd.DataFrame({
'integer': [1, 2, 1, 3, 1],
'float': [0.1, 0.2, 0.1, 0.125, 0.1],
'float#1': [0.0, 0.0, 0.0, 1.0, 0.0],
'categorical': [0.6, 0.2, 0.9, 0.2, 0.6],
'bool': [0.0, -1.0, 0.0, 1.0, 0.0],
'bool#1': [0.0, 1.0, 0.0, 0.0, 0.0],
'datetime': [
1.2636432e+18, 1.2649824e+18, 1.262304e+18,
1.2649824e+18, 1.262304e+18
],
'datetime#1': [1.0, 0.0, 0.0, 0.0, 0.0],
'names': [0.2, 0.9, 0.5, 0.2, 0.7],
})
def get_transformers():
return {
'integer': {
'class': 'NumericalTransformer',
'kwargs': {
'dtype': np.int64,
}
},
'float': {
'class': 'NumericalTransformer',
'kwargs': {
'dtype': np.float64,
}
},
'categorical': {
'class': 'CategoricalTransformer'
},
'bool': {
'class': 'BooleanTransformer'
},
'datetime': {
'class': 'DatetimeTransformer'
},
'names': {
'class': 'CategoricalTransformer',
},
}
def test_hypertransformer_with_transformers():
data = get_input_data_without_nan()
transformers = get_transformers()
ht = HyperTransformer(transformers)
ht.fit(data)
transformed = ht.transform(data)
expected = get_transformed_data()
np.testing.assert_allclose(
transformed.sort_index(axis=1).values,
expected.sort_index(axis=1).values
)
reversed_data = ht.reverse_transform(transformed)
original_names = data.pop('names')
reversed_names = reversed_data.pop('names')
pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))
for name in original_names:
assert name not in reversed_names
def test_hypertransformer_with_transformers_nan_data():
data = get_input_data_with_nan()
transformers = get_transformers()
ht = HyperTransformer(transformers)
ht.fit(data)
transformed = ht.transform(data)
expected = get_transformed_nan_data()
np.testing.assert_allclose(
transformed.sort_index(axis=1).values,
expected.sort_index(axis=1).values
)
reversed_data = ht.reverse_transform(transformed)
original_names = data.pop('names')
reversed_names = reversed_data.pop('names')
pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))
for name in original_names:
assert name not in reversed_names
def test_hypertransformer_without_transformers():
data = get_input_data_without_nan()
ht = HyperTransformer()
ht.fit(data)
transformed = ht.transform(data)
expected = get_transformed_data()
np.testing.assert_allclose(
transformed.sort_index(axis=1).values,
expected.sort_index(axis=1).values
)
reversed_data = ht.reverse_transform(transformed)
original_names = data.pop('names')
reversed_names = reversed_data.pop('names')
pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))
for name in original_names:
assert name not in reversed_names
def test_hypertransformer_without_transformers_nan_data():
data = get_input_data_with_nan()
ht = HyperTransformer()
ht.fit(data)
transformed = ht.transform(data)
expected = get_transformed_nan_data()
np.testing.assert_allclose(
transformed.sort_index(axis=1).values,
expected.sort_index(axis=1).values
)
reversed_data = ht.reverse_transform(transformed)
original_names = data.pop('names')
reversed_names = reversed_data.pop('names')
pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))
for name in original_names:
assert name not in reversed_names
def test_single_category():
ht = HyperTransformer(transformers={
'a': OneHotEncodingTransformer()
})
data = pd.DataFrame({
'a': ['a', 'a', 'a']
})
ht.fit(data)
transformed = ht.transform(data)
reverse = ht.reverse_transform(transformed)
pd.testing.assert_frame_equal(data, reverse)
def test_dtype_category():
df = pd.DataFrame({'a': ['a', 'b', 'c']}, dtype='category')
ht = HyperTransformer()
ht.fit(df)
trans = ht.transform(df)
rever = ht.reverse_transform(trans)
pd.testing.assert_frame_equal(df, rever)
def test_empty_transformers():
"""If transformers is an empty dict, do nothing."""
data = get_input_data_without_nan()
ht = HyperTransformer(transformers={})
ht.fit(data)
transformed = ht.transform(data)
reverse = ht.reverse_transform(transformed)
pd.testing.assert_frame_equal(data, transformed)
pd.testing.assert_frame_equal(data, reverse)
def test_empty_transformers_nan_data():
"""If transformers is an empty dict, do nothing."""
data = get_input_data_with_nan()
ht = HyperTransformer(transformers={})
ht.fit(data)
transformed = ht.transform(data)
reverse = ht.reverse_transform(transformed)
pd.testing.assert_frame_equal(data, transformed)
| pd.testing.assert_frame_equal(data, reverse) | pandas.testing.assert_frame_equal |
import pandas as pd
import os
import re
import pdb
from glob import glob
from arctic import CHUNK_STORE, Arctic
from arctic.date import DateRange
from datetime import datetime as dt
from datetime import date
import arrow
DEFAULT_START_TIME = arrow.get(2017,1,20)
DEFAULT_END_TIME = arrow.get(2017,2,6)
def write_wrapper(target_building, path_to_directory, schema=1):
'''
para:
target_building: the building name and used as library name
path_to_directory: the path to the directory containing data files
i schema: schema used in the csv file
***only supports csv for now with three different schemas:
1 - | timestamp(string) | data
2 - | timestamp(epoch) | data
3 - | data column only |
return a wrapped iterator for write_to_db
'''
os.chdir(path_to_directory)
files = glob('*.csv')
points = []
timestamps = []
data = []
for f in files:
try:
df = | pd.read_csv(f) | pandas.read_csv |
import pandas as pd
import xarray as xr
import numpy as np
import pyomo.environ as pe
from datetime import datetime
import os
from pyomo.opt import SolverStatus, TerminationCondition
class problem:
# Set up the problem
def __init__(self, folder, simulation_name):
# Path to the folder where the input files are stored
self.model_folder = folder
# Name of the simulation (used for the NetCDF output)
self.simulation_name = simulation_name
# Generation units
self.gen = pd.read_csv(folder+'/gen.csv')
# Lines
self.lin = pd.read_csv(folder+'/lin.csv')
# Demand time-series
self.dem = pd.read_csv(folder+'/dem.csv')
# Non-dispatchable renewables time-series
self.ren = pd.read_csv(folder+'/ren.csv')
# Non-dispatchable renewables capacity
self.ren_pp = pd.read_csv(folder+'/ren_pp.csv')
# Inflow time-series
self.inflow = | pd.read_csv(folder + '/inflow.csv') | pandas.read_csv |
import time
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from tqdm import tqdm
from course_lib.Base.BaseRecommender import BaseRecommender
from src.data_management.data_preprocessing_fm import sample_negative_interactions_uniformly
from src.utils.general_utility_functions import get_total_number_of_users, get_total_number_of_items
from sklearn.preprocessing import MinMaxScaler
def preprocess_dataframe_after_reading(df: pd.DataFrame):
df = df.copy()
df = df.sort_values(by="user_id", ascending=True)
df = df.reset_index()
df = df.drop(columns=["index"], inplace=False)
return df
def get_valid_dataframe_second_version(user_id_array, cutoff, main_recommender, path, mapper, recommender_list,
URM_train, user_factors=None, item_factors=None):
data_frame = get_boosting_base_dataframe(user_id_array=user_id_array, top_recommender=main_recommender,
exclude_seen=True, cutoff=cutoff)
for rec in recommender_list:
data_frame = add_recommender_predictions(data_frame=data_frame, recommender=rec,
column_name=rec.RECOMMENDER_NAME)
data_frame = advanced_subclass_handling(data_frame=data_frame, URM_train=URM_train, path=path)
data_frame = add_ICM_information(data_frame=data_frame, path=path, one_hot_encoding_subclass=False,
use_subclass=True)
data_frame = add_UCM_information(data_frame=data_frame, path=path, user_mapper=mapper)
data_frame = add_user_len_information(data_frame=data_frame, URM_train=URM_train)
data_frame = add_item_popularity(data_frame=data_frame, URM_train=URM_train)
if user_factors is not None:
data_frame = add_user_factors(data_frame=data_frame, user_factors=user_factors)
if item_factors is not None:
data_frame = add_item_factors(data_frame=data_frame, item_factors=item_factors)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
return data_frame
def get_train_dataframe_proportion(user_id_array, cutoff, main_recommender, path, mapper, recommender_list,
URM_train, proportion, user_factors=None, item_factors=None,
negative_label_value=0, threshold=0.7):
data_frame = get_boosting_base_dataframe(user_id_array=user_id_array, top_recommender=main_recommender,
exclude_seen=False, cutoff=cutoff)
labels, non_zero_count, _ = get_label_array(data_frame, URM_train)
data_frame['label'] = labels
data_frame = add_random_negative_ratings(data_frame=data_frame, URM_train=URM_train, proportion=proportion,
negative_label_value=negative_label_value)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
for rec in recommender_list:
data_frame = add_recommender_predictions(data_frame=data_frame, recommender=rec,
column_name=rec.RECOMMENDER_NAME)
# Add labels value in order to differentiate more the elements
mask = (data_frame[rec.RECOMMENDER_NAME] > threshold) & (data_frame['label'] > 0)
print("\t Score greater than threshold: {}/{}".format(np.sum(mask), non_zero_count))
data_frame.loc[mask, 'label'] += 1
print("Labels greater than 1: {}".format(np.sum(data_frame['label'] > 1)))
data_frame = advanced_subclass_handling(data_frame=data_frame, URM_train=URM_train, path=path, add_subclass=False)
data_frame = add_ICM_information(data_frame=data_frame, path=path, one_hot_encoding_subclass=False,
use_subclass=True)
data_frame = add_UCM_information(data_frame=data_frame, path=path, user_mapper=mapper)
data_frame = add_user_len_information(data_frame=data_frame, URM_train=URM_train)
data_frame = add_item_popularity(data_frame=data_frame, URM_train=URM_train)
if user_factors is not None:
data_frame = add_user_factors(data_frame=data_frame, user_factors=user_factors)
if item_factors is not None:
data_frame = add_item_factors(data_frame=data_frame, item_factors=item_factors)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
return data_frame
def get_dataframe_all_data(user_id_array, path, mapper, recommender_list,
URM_train, proportion, user_factors=None, item_factors=None):
negative_URM = sample_negative_interactions_uniformly(negative_sample_size=len(URM_train.data) * proportion,
URM=URM_train)
data_frame = get_dataframe_URM(user_id_array=user_id_array, URM_train=URM_train + negative_URM)
labels, _, _ = get_label_array(data_frame, URM_train)
data_frame['label'] = labels
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
for rec in recommender_list:
data_frame = add_recommender_predictions(data_frame=data_frame, recommender=rec,
column_name=rec.RECOMMENDER_NAME)
data_frame = advanced_subclass_handling(data_frame=data_frame, URM_train=URM_train, path=path, add_subclass=False)
data_frame = add_ICM_information(data_frame=data_frame, path=path, one_hot_encoding_subclass=False,
use_subclass=True)
data_frame = add_UCM_information(data_frame=data_frame, path=path, user_mapper=mapper)
data_frame = add_user_len_information(data_frame=data_frame, URM_train=URM_train)
data_frame = add_item_popularity(data_frame=data_frame, URM_train=URM_train)
if user_factors is not None:
data_frame = add_user_factors(data_frame=data_frame, user_factors=user_factors)
if item_factors is not None:
data_frame = add_item_factors(data_frame=data_frame, item_factors=item_factors)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(columns=["index"], inplace=False)
return data_frame
def get_dataframe_first_version(user_id_array, remove_seen_flag, cutoff, main_recommender, path, mapper,
recommender_list,
URM_train):
# Get dataframe for these users
data_frame = get_boosting_base_dataframe(user_id_array=user_id_array, exclude_seen=remove_seen_flag,
cutoff=cutoff, top_recommender=main_recommender)
for rec in recommender_list:
data_frame = add_recommender_predictions(data_frame=data_frame, recommender=rec,
column_name=rec.RECOMMENDER_NAME)
data_frame = add_ICM_information(data_frame=data_frame, path=path)
data_frame = add_UCM_information(data_frame=data_frame, path=path, user_mapper=mapper)
data_frame = add_user_len_information(data_frame=data_frame, URM_train=URM_train)
data_frame = data_frame.sort_values(by="user_id", ascending=True)
data_frame = data_frame.reset_index()
data_frame.drop(columns=["index"], inplace=False)
return data_frame
def add_user_factors(data_frame: pd.DataFrame, user_factors: np.ndarray):
"""
Add user factors to the dataframe
:param data_frame:
:param user_factors:
:return:
"""
print("Adding user factors...")
data_frame = data_frame.copy()
user_factors_df = pd.DataFrame(data=user_factors,
index=np.arange(0, user_factors.shape[0]),
columns=["user_factor_{}".format(i + 1) for i in range(user_factors.shape[1])])
data_frame = pd.merge(data_frame, user_factors_df, left_on="user_id", right_index=True)
return data_frame
def add_item_factors(data_frame: pd.DataFrame, item_factors: np.ndarray):
"""
Add item factors to the dataframe
:param data_frame:
:param item_factors:
:return:
"""
print("Adding item factors...")
data_frame = data_frame.copy()
item_factors_df = pd.DataFrame(data=item_factors,
index=np.arange(0, item_factors.shape[0]),
columns=["item_factor_{}".format(i + 1) for i in range(item_factors.shape[1])])
data_frame = pd.merge(data_frame, item_factors_df, left_on="item_id", right_index=True)
return data_frame
def add_item_popularity(data_frame: pd.DataFrame, URM_train: csr_matrix):
"""
Add the item popularity to the dataframe
:param data_frame: data frame containing information for boosting
:param URM_train: URM train matrix
:return: dataframe containing boosting information + item popularity
"""
print("Adding item popularity...")
data_frame = data_frame.copy()
pop_items = (URM_train > 0).sum(axis=0)
pop_items = np.array(pop_items).squeeze()
item_ids = np.arange(URM_train.shape[1])
data = np.array([item_ids, pop_items])
data = np.transpose(data)
new_df = pd.DataFrame(data=data, columns=["row", "item_pop"])
data_frame = pd.merge(data_frame, new_df, left_on="item_id", right_on="row")
data_frame = data_frame.drop(columns=["row"], inplace=False)
return data_frame
def get_label_array(data_frame: pd.DataFrame, URM_train: csr_matrix):
"""
Create a dataframe with a single column with the correct predictions
:param data_frame: data frame containing information for boosting
:param URM_train: URM train matrix
:return: numpy array containing y information
"""
print("Retrieving training labels...")
user_ids = data_frame['user_id'].values
item_ids = data_frame['item_id'].values
y = np.zeros(user_ids.size, dtype=np.int)
labels = np.array(URM_train[user_ids, item_ids].tolist()).flatten()
y[labels > 0] = 1
non_zero_count = np.count_nonzero(y)
print("\t- There are {} non-zero ratings in {}".format(non_zero_count, y.size))
return y, non_zero_count, y.size
def add_user_len_information(data_frame: pd.DataFrame, URM_train: csr_matrix):
"""
Add information concerning the user profile length to the row of the dataframe
:param data_frame: data frame that is being pre-processed from boosting
:param URM_train: URM train from which to take profile length information
:return: data frame with new content inserted
"""
print("Adding user profile length...")
data_frame = data_frame.copy()
user_act = (URM_train > 0).sum(axis=1)
user_act = np.array(user_act).squeeze()
user_ids = np.arange(URM_train.shape[0])
data = np.array([user_ids, user_act])
data = np.transpose(data)
new_df = pd.DataFrame(data=data, columns=["row", "user_act"])
data_frame = pd.merge(data_frame, new_df, left_on="user_id", right_on="row")
data_frame = data_frame.drop(columns=["row"], inplace=False)
return data_frame
def remap_data_frame(df: pd.DataFrame, mapper):
"""
Change user_id columns of the df given in input, according to the mapper.
Users that are not present will be removed, and the others will be mapped to the correct number.
:param df: dataframe that will be modified
:param mapper: mapper according to which the dataframe will be modified
:return: dataframe with "user_id" column modified properly
"""
df = df.copy()
# Remove users that are not present in the mapper
original_users = df['row'].values
new_users_key = list(mapper.keys())
new_users_key = list(map(int, new_users_key))
new_users_key = np.array(new_users_key)
mask = np.in1d(original_users, new_users_key, invert=True)
remove = original_users[mask]
df = df.set_index("row")
mask = np.in1d(df.index, remove)
df = df.drop(df.index[mask])
# Map the index to the new one
df = df.reset_index()
df['row'] = df['row'].map(lambda x: mapper[str(x)])
return df
def add_UCM_information(data_frame: pd.DataFrame, user_mapper, path="../../data/", use_region=True, use_age=True,
use_age_onehot=False):
"""
Add UCM information to the data frame for XGboost
:param data_frame: data frame containing information being pre-processed for boosting
:param user_mapper: mapper original users to train users
:param path: where to read UCM csv files
:param use_region: True is region information should be used, false otherwise
:param use_age: True if age information should be used, false otherwise
:param use_age_onehot: True if age information added is one hot, false otherwise
:return: pd.DataFrame containing the original data frame+ UCM information
"""
print("Adding UCM information...")
t_users = get_total_number_of_users() # Total number of users (-1 since indexing from 0)
data_frame = data_frame.copy()
df_region: pd.DataFrame = pd.read_csv(path + "data_UCM_region.csv")
df_age: pd.DataFrame = pd.read_csv(path + "data_UCM_age.csv")
# Re-map UCM data frame in order to have the correct user information
if use_region:
df_region = df_region[['row', 'col']]
df_dummies = pd.get_dummies(df_region['col'], prefix='region')
df_dummies = df_dummies.join(df_region['row'])
df_dummies = df_dummies.groupby(['row'], as_index=False).sum()
# Fill missing values
user_present = df_dummies['row'].values
total_users = np.arange(t_users)
mask = np.in1d(total_users, user_present, invert=True)
missing_users = total_users[mask]
num_col = df_dummies.columns.size
imputed_users = np.zeros(shape=(num_col, missing_users.size))
imputed_users[0] = missing_users
missing_df = pd.DataFrame(data=np.transpose(imputed_users), dtype=np.int32, columns=df_dummies.columns)
df_region_onehot = df_dummies.append(missing_df, sort=False)
if user_mapper is not None:
df_region_onehot = remap_data_frame(df=df_region_onehot, mapper=user_mapper)
data_frame = pd.merge(data_frame, df_region_onehot, right_on="row", left_on="user_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
if use_age:
df_age = df_age[['row', 'col']]
# Handle missing values: fill with mode + 1
users_present = df_age['row'].values
total_users = np.arange(t_users)
mask = np.in1d(total_users, users_present, invert=True)
missing_users = total_users[mask].astype(np.int32)
missing_val_filled = np.ones(missing_users.size) * (int(df_age['col'].mode()) + 1)
missing = np.array([missing_users, missing_val_filled], dtype=np.int32)
missing_df = pd.DataFrame(data=np.transpose(missing), columns=["row", "col"])
df_age_imputed = df_age.copy().append(missing_df, sort=False)
df_age_imputed = df_age_imputed.reset_index()
df_age_imputed = df_age_imputed[['row', 'col']]
if user_mapper is not None:
df_age_imputed = remap_data_frame(df=df_age_imputed, mapper=user_mapper)
df_age_imputed = df_age_imputed.rename(columns={"col": "age"})
if use_age_onehot:
row = df_age_imputed['row']
df_age_imputed = pd.get_dummies(df_age_imputed['age'], prefix='age')
df_age_imputed = df_age_imputed.join(row)
data_frame = pd.merge(data_frame, df_age_imputed, right_on="row", left_on="user_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
# Add dummy variables indicating that the region has been imputed
df_age_dummy_imputation = df_age.copy()
df_age_dummy_imputation['col'] = 0
imputed_df = pd.DataFrame(
data={"row": missing_users, "col": np.ones(shape=missing_users.size, dtype=np.int)})
df_age_dummy_imputation = df_age_dummy_imputation.append(imputed_df, sort=False)
df_age_dummy_imputation = df_age_dummy_imputation.rename(columns={"col": "age_imputed_flag"})
if user_mapper is not None:
df_age_dummy_imputation = remap_data_frame(df=df_age_dummy_imputation, mapper=user_mapper)
data_frame = pd.merge(data_frame, df_age_dummy_imputation, right_on="row", left_on="user_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
return data_frame
def advanced_subclass_handling(data_frame: pd.DataFrame, URM_train: csr_matrix, path="../../data/",
add_subclass=False):
"""
Here we want to include in the training set sub class information in the following way:
- A column encoding the mean of 'label' for a certain couple (user, subclass): i.e. how many
items of that subclass the user liked
- Including information about the popularity of the subclass (how many items for that subclass
- Including ratings of that subclass
:param URM_train: mean response will be retrieved from here
:param data_frame: dataframe being pre-processed for boosting
:param path: path to the folder containing subclass dataframe
:return: dataframe with augmented information
"""
print("Adding subclass and feature engineering subclass...")
data_frame = data_frame.copy()
df_subclass: pd.DataFrame = pd.read_csv(path + "data_ICM_sub_class.csv")
df_subclass = df_subclass[['row', 'col']]
df_subclass = df_subclass.rename(columns={"col": "subclass"})
# Merging sub class information
data_frame = pd.merge(data_frame, df_subclass, right_on="row", left_on="item_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
print("\t- Add items present for each subclass")
# Add subclass item-popularity: how many items are present of that subclass
subclass_item_count = df_subclass.groupby("subclass").count()
data_frame = pd.merge(data_frame, subclass_item_count, right_index=True, left_on="subclass")
data_frame = data_frame.rename(columns={"row": "item_per_subclass"})
print("\t- Add ratings popularity for each subclass")
# Add subclass ratings-popularity: how many interactions we have for each subclass
URM_train_csc = URM_train.tocsc()
n_ratings_sub = []
sorted_sub_indices = np.argsort(df_subclass['subclass'].values)
sorted_sub = df_subclass['subclass'][sorted_sub_indices].values
sorted_item_subclass = df_subclass['row'][sorted_sub_indices].values
unique_sorted_sub, sub_indptr = np.unique(sorted_sub, return_index=True)
sub_indptr = np.concatenate([sub_indptr, [sorted_sub.size]])
for i, sub in tqdm(enumerate(unique_sorted_sub), total=unique_sorted_sub.size, desc="\t\tProcessing"):
item_sub = sorted_item_subclass[sub_indptr[i]: sub_indptr[i + 1]]
n_ratings_sub.append(URM_train_csc[:, item_sub].data.size)
ratings_sub = np.array([unique_sorted_sub, n_ratings_sub])
ratings_per_sub_df = pd.DataFrame(data=np.transpose(ratings_sub),
columns=["subclass", "global_ratings_per_subclass"])
data_frame = pd.merge(data_frame, ratings_per_sub_df, left_on="subclass", right_on="subclass")
# Add subclass ratings-popularity for each user using rating percentage
print("\t- Add ratings popularity for pairs (user, subclass)")
users = data_frame['user_id'].values
sub = data_frame['subclass'].values
perc_array = np.zeros(users.size)
rat_array = np.zeros(users.size)
for i, user in tqdm(enumerate(users), total=users.size, desc="\t\tProcessing"):
curr_sub = sub[i]
curr_sub_index = np.searchsorted(unique_sorted_sub, curr_sub)
# Find items of this subclass
item_sub = sorted_item_subclass[sub_indptr[curr_sub_index]: sub_indptr[curr_sub_index + 1]]
user_item = URM_train.indices[URM_train.indptr[user]: URM_train.indptr[user + 1]]
total_user_likes = user_item.size
mask = np.in1d(item_sub, user_item)
likes_per_sub = item_sub[mask].size
user_p = likes_per_sub / total_user_likes
perc_array[i] = user_p
rat_array[i] = likes_per_sub
data_frame["subclass_user_like_perc"] = perc_array
data_frame["subclass_user_like_quantity"] = rat_array
if not add_subclass:
data_frame = data_frame.drop(columns=["subclass"], inplace=False)
return data_frame
def add_ICM_information(data_frame: pd.DataFrame, path="../../data/", use_price=True, use_asset=True,
use_subclass=True, one_hot_encoding_subclass=False):
"""
Add information form the ICM files to the data frame
:param one_hot_encoding_subclass: if one hot encoding should be applied to subclass or not
:param data_frame: data frame that is being pre-processed for boosting
:param path: path to the folder containing the csv files
:param use_price: True if you wish to append price information, false otherwise
:param use_asset: True if you wish to append asset information, false otherwise
:param use_subclass: True if you wish to append subclass information, false otherwise
:return: pd.DataFrame containing the information
"""
print("Adding ICM information...")
data_frame = data_frame.copy()
df_price: pd.DataFrame = pd.read_csv(path + "data_ICM_price.csv")
df_asset: pd.DataFrame = pd.read_csv(path + "data_ICM_asset.csv")
df_subclass: pd.DataFrame = pd.read_csv(path + "data_ICM_sub_class.csv")
total_items = get_total_number_of_items()
total_items = np.arange(total_items)
if use_price:
# Handle missing values
item_present = df_price['row'].values
mask = np.in1d(total_items, item_present, invert=True)
missing_items = total_items[mask].astype(np.int32)
missing_val_filled = np.ones(missing_items.size) * df_price['data'].median()
missing = np.array([missing_items, missing_val_filled])
missing_df = pd.DataFrame(data=np.transpose(missing), columns=['row', 'data'])
df_price = df_price.append(missing_df, sort=False)
df_price = df_price.reset_index()
df_price = df_price[['row', 'data']]
# TODO remove outliers and add dummy variable
df_price = df_price.rename(columns={"data": "price"})
data_frame = pd.merge(data_frame, df_price, right_on="row", left_on="item_id")
data_frame = data_frame.drop(columns=['row'], inplace=False)
if use_asset:
# Handle missing values
item_present = df_asset['row'].values
mask = np.in1d(total_items, item_present, invert=True)
missing_items = total_items[mask].astype(np.int32)
missing_val_filled = np.ones(missing_items.size) * df_asset['data'].median()
missing = np.array([missing_items, missing_val_filled])
missing_df = pd.DataFrame(data=np.transpose(missing), columns=['row', 'data'])
df_asset = df_asset.append(missing_df, sort=False)
df_asset = df_asset.reset_index()
df_asset = df_asset[['row', 'data']]
# TODO remove outliers and add dummy variable
df_asset = df_asset.rename(columns={"data": "asset"})
data_frame = pd.merge(data_frame, df_asset, right_on="row", left_on="item_id")
data_frame = data_frame.drop(columns=["row"], inplace=False)
if use_subclass:
df_subclass = df_subclass[['row', 'col']]
df_subclass = df_subclass.rename(columns={"col": "subclass"})
if not one_hot_encoding_subclass:
data_frame = pd.merge(data_frame, df_subclass, right_on="row", left_on="item_id")
else:
dummies = | pd.get_dummies(df_subclass['subclass']) | pandas.get_dummies |
import copy
from functools import lru_cache
from os import path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ray import tune
from scipy import stats
from tune_tf2.defaults import EXPLOIT_CSV, HPS_CSV, PBT_CSV
@lru_cache(maxsize=10)
def load_tune_df(experiment_dir):
"""Cache the creation of this dataframe. """
return tune.Analysis(experiment_dir).dataframe()
def read_pbt_fitlog(pbt_dir, metrics_to_load=None, reconstruct=False):
"""Function for loading entire LF2 PBT fitlog.
This function loads an entire PBT fitlog into a pandas DataFrame.
Generations, trials, and epochs are labeled in their respective
columns of the DataFrame. The DataFrame.pivot function is particularly
useful for reshaping this data into whatever format is most useful.
Parameters
----------
pbt_dir : str
The path to the PBT run.
metrics_to_load : list of str, optional
The metrics to load into the DataFrame, by default None loads
all of the available metrics.
reconstruct : bool, optional
Whether to reconstruct the actual paths of the individual models
by duplicating paths of exploited models and removing the paths
of killed models, by default False
Returns
pd.DataFrame
A DataFrame containing the metrics of interest.
Examples
--------
To load the PBT fitlog
>>> fit_df = read_pbt_fitlog(pbt_dir)
Pivot to show `val_nll_heldin` across epochs
>>> plot_df = fit_df.pivot(
index='epoch',
columns='trial_id',
values='val_nll_heldin')
"""
if metrics_to_load is not None and "epoch" not in metrics_to_load:
metrics_to_load += ["epoch"]
tune_df = load_tune_df(pbt_dir)
fit_dfs = []
# Collect all of the LF2 data in `fit_dfs`
for trial_id, logdir in zip(tune_df.trial_id, tune_df.logdir):
if "best_model" in logdir:
continue
train_data_path = path.join(logdir, "model_dir", "train_data.csv")
fit_df = pd.read_csv(train_data_path, usecols=metrics_to_load)
fit_df["trial_id"] = trial_id
fit_dfs.append(fit_df)
pbt_df = pd.concat(fit_dfs, ignore_index=True)
# Get the generation info
pbt_state_path = path.join(pbt_dir, PBT_CSV)
pbt_state_df = pd.read_csv(pbt_state_path, usecols=["generation", "epoch"])
# add generation information to the dataframe
prev_end = 0
pbt_df["generation"] = -1
for index, row in pbt_state_df.iterrows():
gen, end_epoch = row.generation, row.epoch
generation_rows = (pbt_df["epoch"] > prev_end) & (pbt_df["epoch"] <= end_epoch)
pbt_df.loc[generation_rows, "generation"] = gen
prev_end = end_epoch
if reconstruct:
return reconstruct_history(pbt_dir, pbt_df)
else:
return pbt_df
def plot_pbt_log(pbt_dir, plot_field, reconstruct=False, save_dir=None, **kwargs):
"""Plots a specific field of the PBT fitlog.
This function uses `load_pbt_fitlog` to load the training
log and then overlays traces from all of the workers.
It uses robust statistics to calculate the appropriate window
for easy viewing.
Parameters
----------
pbt_dir : str
The path to the PBT run.
plot_field : str
The metric to plot. See the fitlog headers or lfads_tf2
source code for options.
reconstruct : bool, optional
Whether to reconstruct the actual paths of the individual models
by duplicating paths of exploited models and removing the paths
of killed models, by default False
save_dir : str, optional
The directory for saving the figure, by default None will
show an interactive plot
kwargs: optional
Any keyword arguments to be passed to pandas.DataFrame.plot
"""
# limit the metrics loaded from the CSV
fit_df = read_pbt_fitlog(pbt_dir, [plot_field], reconstruct=reconstruct)
plot_df = fit_df.pivot(index="epoch", columns="trial_id", values=plot_field)
# compute the window
epoch_range = plot_df.index.min(), plot_df.index.max()
iqr = stats.iqr(plot_df, nan_policy="omit")
median = np.nanmedian(plot_df)
field_range = np.nanmin(plot_df), median + 5 * iqr
plot_kwargs = dict(
xlim=epoch_range,
ylim=field_range,
legend=False,
alpha=0.3,
)
plot_kwargs.update(kwargs)
# make the plot
plot_df.plot(**plot_kwargs)
plt.xlabel("epoch")
plt.ylabel(plot_field)
# save the plot if necessary
if save_dir is not None:
filename = plot_field.replace(".", "_").lower()
fig_path = path.join(save_dir, f"{filename}.png")
plt.savefig(fig_path, bbox_inches="tight")
plt.close()
def read_pbt_hps(pbt_dir, reconstruct=False):
"""Function for loading the HPs used in a PBT run.
This function loads the HPs used during a PBT run into a
pandas DataFrame. Generations and trials are labeled in
their respective columns of the DataFrame. The DataFrame.pivot
function is particularly useful for reshaping this data into
whatever format is most useful.
Parameters
----------
pbt_dir : str
The path to the PBT run.
reconstruct : bool, optional
Whether to reconstruct the actual paths of the individual models
by duplicating paths of exploited models and removing the paths
of killed models, by default False
Returns
-------
pd.DataFrame
A DataFrame containing the HPs of interest.
Examples
--------
To load the PBT HPs
>>> hps_df = read_pbt_hps(pbt_dir)
Pivot to show `TRAIN.KL.CO_WEIGHT` across generations
>>> plot_df = hps_df.pivot(
index='generation',
columns='trial_id',
values='TRAIN.KL.CO_WEIGHT')
"""
hps_path = path.join(pbt_dir, HPS_CSV)
hps_df = pd.read_csv(hps_path)
if reconstruct:
return reconstruct_history(pbt_dir, hps_df)
else:
return hps_df
def plot_pbt_hps(pbt_dir, plot_field, reconstruct=False, save_dir=None, **kwargs):
"""Plots an HP for all models over the course of PBT.
This function generates a plot to visualize how an HP
changes over the course of PBT.
Parameters
----------
pbt_dir : str
The path to the PBT run.
plot_field : str
The HP to plot. See the HP log headers or lfads_tf2
source code for options.
reconstruct : bool, optional
Whether to reconstruct the actual paths of the individual
models by duplicating paths of exploited models and
removing the paths of killed models, by default False
color : str, optional
The color to use for the HP plot, passed to
matplotlib, by default 'b'
alpha : float, optional
The transparency for the HP plot traces, passed to
matplotlib, by default 0.2
save_dir : str, optional
The directory for saving the figure, by default None will
show an interactive plot
kwargs: optional
Any keyword arguments to be passed to pandas.DataFrame.plot
"""
hps_df = read_pbt_hps(pbt_dir, reconstruct=reconstruct)
plot_df = hps_df.pivot(index="generation", columns="trial_id", values=plot_field)
gen_range = plot_df.index.min(), plot_df.index.max()
field_range = plot_df.min().min(), plot_df.max().max()
plot_kwargs = dict(
drawstyle="steps-post",
legend=False,
logy=True,
c="b",
alpha=0.2,
title=f"{plot_field} for PBT run at {pbt_dir}",
xlim=gen_range,
ylim=field_range,
figsize=(10, 5),
)
plot_kwargs.update(kwargs)
plot_df.plot(**plot_kwargs)
if save_dir is None:
plt.show()
else:
filename = plot_field.replace(".", "_").lower()
fig_path = path.join(save_dir, f"{filename}.png")
plt.savefig(fig_path, bbox_inches="tight")
plt.close()
def reconstruct_history(pbt_dir, pbt_df):
"""Reconstructs actual model trajectories from fitlog.
This function pieces together the successful models
to determine what history looked like for the final
PBT models. Note that this function works on both
the PBT fitlog and HP DataFrames.
Parameters
----------
pbt_dir : str
The path to the PBT run.
pbt_df : pd.DataFrame
A DataFrame containing the metrics or HPs of interest.
Returns
------
pd.DataFrame
A DataFrame containing the reconstructed metrics
or HPs of interest.
"""
exploit_path = path.join(pbt_dir, EXPLOIT_CSV)
exploit_df = pd.read_csv(exploit_path)
exploits = exploit_df.pivot(
index="generation", columns="old_trial", values="new_trial"
)
reconstruction = {i: [i] for i in range(len(exploits.columns))}
for gen in range(1, len(exploits)):
prev_reconst = copy.deepcopy(reconstruction)
for exploited, exploiter in exploits.loc[gen].iteritems():
if not | pd.isna(exploiter) | pandas.isna |
from pathlib import Path
from typing import Dict
import numpy as np
import pandas as pd
def read_files(folder) -> Dict[str, pd.DataFrame]:
"""Read in all data files. The time will be the index if present.
Parameters
----------
folder : str or pathlike
The folder where the data files are located
Returns
-------
Dict[str, pd.DataFrame]
The different data files as DataFrames in a dict
"""
folder = Path(folder)
filenames = ('efield.t', 'expec.t', 'npop.t', 'nstate_i.t', 'table.dat')
nstate = np.loadtxt(folder / filenames[3], skiprows=1)
# convert to complex
nstate_complex = nstate[:, 1::2].astype('complex') + nstate[:, 2::2] * 1j
nstate_df = pd.DataFrame(nstate_complex)
nstate_df.set_index(nstate[:, 0])
nstate_df.index.name = 'time'
efield = pd.read_csv(folder / filenames[0], delim_whitespace=True).set_index('time')
expec = pd.read_csv(folder / filenames[1], delim_whitespace=True).set_index('time')
npop = | pd.read_csv(folder / filenames[2], delim_whitespace=True) | pandas.read_csv |
import dash
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_core_components as dcc
#import dash_auth
import plotly.graph_objs as go
import pandas as pd
import dash_table
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from newsplease import NewsPlease
import pickle
gs = pd.read_csv('data/gold_standard.csv')
policy_df = pd.read_csv('data/policy_df.csv')
def load_obj(month, idx):
'''Used to load the pickle article using
month and idx parameters of gold standard row'''
month = str(month).zfill(2)
idx = str(idx).zfill(5)
with open("data/texts/{}/{}.pkl".format(month, idx), "rb") as f:
return pickle.load(f)
gs_articles = {}
for i in range(len(gs)):
article = load_obj(gs['month'][i], gs['ids'][i])
gs_articles[i] = article
app = dash.Dash()
app.layout = html.Div(children=[html.H3('Gold Standard Data'),
dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in gs.columns],
data=gs.to_dict('records'),
row_selectable="single",
selected_rows = [],
fixed_rows={ 'headers': True, 'data': 0 },
style_table={'maxHeight': '300px','overflowX': 'scroll'},
style_cell={
'textAlign': 'left',
'height': 'auto',
'minWidth': '0px', 'maxWidth': '180px',
'whiteSpace': 'normal'
}
), html.Br(), html.Div(id='article-div'),
html.H3("Policy recommendations based on consine similarity are :"),
html.Div(id='recomend-div')
], className='container')
@app.callback(
Output('article-div', "children"),
[Input('table', "selected_rows")])
def update_article(selected_rows):
#print(f"{gs_articles[0]}")
if len(selected_rows):
return f"selected row {selected_rows}, article is {gs_articles[selected_rows[0]].text}"
else :
return f"Please select one of the above row to get the policy recommendation"
@app.callback(
Output('recomend-div','children'),
[Input('table', "selected_rows")])
def recommend_policies(selected_rows):
if len(selected_rows):
article = gs_articles[selected_rows[0]]
policies_recommended = []
# policy_text_arrays = policy_df['policy_text'].values
policy_text_arrays = policy_df['policy_text'].values
article_index = len(policy_text_arrays)
policy_text_arrays = np.append(policy_text_arrays, article.text)
tfidf = TfidfVectorizer(ngram_range=(1,3), stop_words='english')
tfidf_matrix = tfidf.fit_transform(policy_text_arrays)
cosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix)
cosine_values_series = | pd.Series(cosine_similarities[article_index]) | pandas.Series |
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_no_items_raises(self):
with pytest.raises(ValueError, match="No objects to concatenate"):
concat([])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat({"a": None, "b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
expected = concat({"b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = | tm.makeTimeSeries() | pandas._testing.makeTimeSeries |
import os
import numpy as np
import soundfile
import librosa
from sklearn import metrics
import logging
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pandas as pd
import sed_eval
import torch
from torch.autograd import Variable
import vad
from vad import activity_detection
import config
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def get_filename(path):
path = os.path.realpath(path)
name_ext = path.split('/')[-1]
name = os.path.splitext(name_ext)[0]
return name
def create_logging(log_dir, filemode):
create_folder(log_dir)
i1 = 0
while os.path.isfile(os.path.join(log_dir, "%04d.log" % i1)):
i1 += 1
log_path = os.path.join(log_dir, "%04d.log" % i1)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=log_path,
filemode=filemode)
# Print to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return logging
def read_audio(path, target_fs=None):
(audio, fs) = soundfile.read(path)
if audio.ndim > 1:
audio = np.mean(audio, axis=1)
if target_fs is not None and fs != target_fs:
audio = librosa.resample(audio, orig_sr=fs, target_sr=target_fs)
fs = target_fs
return audio, fs
def calculate_scalar(x):
if x.ndim == 2:
axis = 0
elif x.ndim == 3:
axis = (0, 1)
mean = np.mean(x, axis=axis)
std = np.std(x, axis=axis)
return mean, std
def scale(x, mean, std):
return (x - mean) / std
def inverse_scale(x, mean, std):
return x * std + mean
def pad_or_trunc(x, max_len):
if len(x) == max_len:
return x
elif len(x) > max_len:
return x[0 : max_len]
else:
(seq_len, freq_bins) = x.shape
pad = np.zeros((max_len - seq_len, freq_bins))
return np.concatenate((x, pad), axis=0)
def calculate_auc(target, predict, average='macro'):
return metrics.roc_auc_score(target, predict, average)
def calculate_ap(target, predict, average='macro'):
return metrics.average_precision_score(target, predict, average)
def calculate_f1_score(target, predict, average='macro'):
import crash
asdf
return metrics.f1_score(target, predict, average=average)
def read_strong_meta(strong_meta):
"""Read list of events from strong meta.
Args:
strong_meta: str, path of strong meta
Returns:
events_list: list of events
"""
df = | pd.read_csv(strong_meta, sep='\t') | pandas.read_csv |
import os
import shutil
import sys
import glob
from pathlib import Path
import pandas as pd
# -----------------
# STEP 0: variables
# -----------------
root_dir = '/exports/fsw/Bendlab/SamenUniek'
raw_sessions = ['MCC_ses01-lab']
bids_sessions = ['ses-w01lab']
file_type = ['3DT1', 'SNAT1', 'SNAT2', 'SNAT3', 'PCG1', 'PCG2', 'PCG3', 'rsfMRI', 'hires', 'B0-map_RS', 'B0-map', 'B0-map', 'B0-map', 'jones30_A', 'jones30_P']
new_file_type = ['T1mri', 'bold_SNAT1', 'bold_SNAT2', 'bold_SNAT3', 'bold_PCG1', 'bold_PCG2', 'bold_PCG3', 'bold_rsfmr', 'T2str', 'B0RS', 'Bzero1', 'Bzero2', 'Bzero3', 'DTIap', 'DTIpa', 'unknown_type', 'log']
cols = ['participant','nr_files'] + new_file_type
prefix = 'sub-mcc'
conversion_log_dir = os.path.join(root_dir, 'conversion_logs')
# Create top-level pseudobids directory
pseudobids_dir = os.path.join(root_dir, 'pseudobids')
if not os.path.exists(pseudobids_dir):
os.mkdir(pseudobids_dir)
# --------------------------------
# STEP 1: Loop through sessions, participants:
# - rename PAR and REC files (in place)
# - copy participant files to new pseudobids directory structure
# --------------------------------
for i, session in enumerate(raw_sessions):
raw_data_dir = os.path.join(root_dir, session)
print(raw_data_dir)
# Log file
conversion_log_fn = os.path.join(conversion_log_dir, session + '_conversion_log.csv')
# If the log file already exists, read contents into dataframe. If not, create dataframe.
if os.path.isfile(conversion_log_fn):
df = | pd.read_csv(conversion_log_fn) | pandas.read_csv |
# Preprocessing
import os, matplotlib
if 'DISPLAY' not in os.environ:
matplotlib.use('Pdf')
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.max_rows', 50)
import numpy as np
import xgboost as xgb
import xgbfir
import pdb
import time
np.random.seed(1337)
def client_anaylsis():
"""
The idea here is to unify the client ID of several different customers to more broad categories.
"""
# clean duplicate spaces in client names
client_df = pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
client_df["NombreCliente"] = client_df["NombreCliente"].str.lower()
client_df["NombreCliente"] = client_df["NombreCliente"].apply(lambda x: " ".join(x.split()))
client_df = client_df.drop_duplicates(subset="Cliente_ID")
special_list = ["^(yepas)\s.*", "^(oxxo)\s.*", "^(bodega\scomercial)\s.*", "^(bodega\saurrera)\s.*", "^(bodega)\s.*",
"^(woolwort|woolworth)\s.*", "^(zona\sexpress)\s.*",
"^(zacatecana)\s.*", "^(yza)\s.*",
"^(yanet)\s.*", "^(yak)\s.*",
"^(wings)\s.*", "^(wendy)\s.*", "^(walmart\ssuper)\s?.*", "^(waldos)\s.*",
"^(wal\smart)\s.*", "^(vulcanizadora)\s.*", "^(viveres\sy\sservicios)\s.*",
"^(vips)\s.*", "^(vinos\sy\slicores)\s.*", "^(tienda\ssuper\sprecio)\s.*",
"^(vinos\sy\sabarrotes)\s.*", "^(vinateria)\s.*", "^(video\sjuegos)\s.*", "^(universidad)\s.*",
"^(tiendas\stres\sb)\s.*", "^(toks)\s.*","^(tkt\ssix)\s.*",
"^(torteria)\s.*", "^(tortas)\s.*", "^(super\sbara)\s.*",
"^(tiendas\sde\ssuper\sprecio)\s.*", "^(ultramarinos)\s.*", "^(tortilleria)\s.*",
"^(tienda\sde\sservicio)\s.*", "^(super\sx)\s.*", "^(super\swillys)\s.*",
"^(super\ssanchez)\s.*", "^(super\sneto)\s.*", "^(super\skompras)\s.*",
"^(super\skiosco)\s.*", "^(super\sfarmacia)\s.*", "^(super\scarnes)\s.*",
"^(super\scarniceria)\s.*", "^(soriana)\s.*", "^(super\scenter)\s.*",
"^(solo\sun\sprecio)\s.*", "^(super\scity)\s.*", "^(super\sg)\s.*", "^(super\smercado)\s.*",
"^(sdn)\s.*", "^(sams\sclub)\s.*", "^(papeleria)\s.*", "^(multicinemas)\s.*",
"^(mz)\s.*", "^(motel)\s.*", "^(minisuper)\s.*", "^(mini\stienda)\s.*",
"^(mini\ssuper)\s.*", "^(mini\smarket)\s.*", "^(mini\sabarrotes)\s.*", "^(mi\sbodega)\s.*",
"^(merza|merzapack)\s.*", "^(mercado\ssoriana)\s.*", "^(mega\scomercial)\s.*",
"^(mc\sdonalds)\s.*", "^(mb)\s[^ex].*", "^(maquina\sfma)\s.*", "^(ley\sexpress)\s.*",
"^(lavamatica)\s.*", "^(kiosko)\s.*", "^(kesos\sy\skosas)\s.*", "^(issste)\s.*",
"^(hot\sdogs\sy\shamburguesas|)\s.*", "^(hamburguesas\sy\shot\sdogs)\s.*", "(hot\sdog)",
"^(hospital)\s.*", "^(hiper\ssoriana)\s.*", "^(super\sahorros)\s.*", "^(super\sabarrotes)\s.*",
"^(hambuerguesas|hamburguesas|hamburgesas)\s.*", "^(gran\sbodega)\s.*",
"^(gran\sd)\s.*", "^(go\smart)\s.*", "^(gasolinera)\s.*", "^(fundacion)\s.*",
"^(fruteria)\s.*", "^(frutas\sy\sverduras)\s.*", "^(frutas\sy\slegumbres)\s.*",
"^(frutas\sy\sabarrotes)\s.*", "^(fma)\s.*", "^(fiesta\sinn)\s.*", "^(ferreteria)\s.*",
"^(farmacon)\s.*", "^(farmacias)\s.*", "^(farmacia\syza)\s.*",
"^(farmacia\smoderna)\s.*", "^(farmacia\slopez)\s.*",
"^(farmacia\sissste)\s.*", "^(farmacia\sisseg)\s.*", "^(farmacia\sguadalajara)\s.*",
"^(farmacia\sesquivar)\s.*", "^(farmacia\scalderon)\s.*", "^(farmacia\sbenavides)\s.*",
"^(farmacia\sabc)\s.*", "^(farmacia)\s.*", "^(farm\sguadalajara)\s.*",
"^(facultad\sde)\s.*", "^(f\sgdl)\s.*", "^(expendio)\s.*", "^(expendio\sde\span)\s.*",
"^(expendio\sde\shuevo)\s.*", "^(expendio\sbimbo)\s.*", "^(expendedoras\sautomaticas)\s.*",
"^(estic)\s.*", "^(estancia\sinfantil)\s.*", "^(estacionamiento)\s.*", "^(estanquillo)\s.*",
"^(estacion\sde\sservicio)\s.*", "^(establecimientos?)\s.*",
"^(escuela\suniversidad|esc\suniversidad)\s.*", "^(escuela\stelesecundaria|esc\stelesecundaria)\s.*",
"^(escuela\stecnica|esc\stecnica)\s.*",
"^(escuela\ssuperior|esc\ssuperior)\s.*", "^(escuela\ssecundaria\stecnica|esc\ssecundaria\stecnica)\s.*",
"^(escuela\ssecundaria\sgeneral|esc\ssecundaria\sgeneral)\s.*",
"^(escuela\ssecundaria\sfederal|esc\ssecundaria\sfederal)\s.*",
"^(escuela\ssecundaria|esc\ssecundaria)\s.*", "^(escuela\sprimaria|esc\sprimaria)\s.*",
"^(escuela\spreparatoria|esc\spreparatoria)\s.*", "^(escuela\snormal|esc\snormal)\s.*",
"^(escuela\sinstituto|esc\sinstituto)\s.*", "^(esc\sprepa|esc\sprep)\s.*",
"^(escuela\scolegio|esc\scolegio)\s.*", "^(escuela|esc)\s.*", "^(dunosusa)\s.*",
"^(ferreteria)\s.*", "^(dulces)\s.*", "^(dulceria)\s.*", "^(dulce)\s.*", "^(distribuidora)\s.*",
"^(diconsa)\s.*", "^(deposito)\s.*", "^(del\srio)\s.*", "^(cyber)\s.*", "^(cremeria)\s.*",
"^(cosina\seconomica)\s.*", "^(copy).*", "^(consumo|consumos)\s.*","^(conalep)\s.*",
"^(comercializadora)\s.*", "^(comercial\ssuper\salianza)\s.*",
"^(comercial\smexicana)\s.*", "^(comedor)\s.*", "^(colegio\sde\sbachilleres)\s.*",
"^(colegio)\s.*", "^(coffe).*", "^(cocteleria|cockteleria)\s.*", "^(cocina\seconomica)\s.*",
"^(cocina)\s.*", "^(cobaev)\s.*", "^(cobaes)\s.*", "^(cobaeh)\s.*", "^(cobach)\s.*",
"^(club\sde\sgolf)\s.*", "^(club\scampestre)\s.*", "^(city\sclub)\s.*", "^(circulo\sk)\s.*",
"^(cinepolis)\s.*", "^(cinemex)\s.*", "^(cinemas)\s.*", "^(cinemark)\s.*", "^(ciber)\s.*",
"^(church|churchs)\s.*", "^(chilis)\s.*", "^(chiles\sy\ssemillas)\s.*", "^(chiles\ssecos)\s.*",
"^(chedraui)\s.*", "^(cetis)\s.*", "^(cervefrio)\s.*", "^(cervefiesta)\s.*",
"^(cerveceria)\s.*", "^(cervecentro)\s.*", "^(centro\sescolar)\s.*", "^(centro\seducativo)\s.*",
"^(centro\sde\sestudios)\s.*", "^(centro\scomercial)\s.*", "^(central\sde\sautobuses)\s.*",
"^(cecytem)\s.*", "^(cecytec)\s.*", "^(cecyte)\s.*", "^(cbtis)\s.*", "^(cbta)\s.*", "^(cbt)\s.*",
"^(caseta\stelefonica)\s.*", "^(caseta)\s.*", "^(casa\sley)\s.*", "^(casa\shernandez)\s.*",
"^(cartonero\scentral)\s.*", "^(carniceria)\s.*", "^(carne\smart)\s.*", "^(calimax)\s.*",
"^(cajero)\s.*", "^(cafeteria)\s.*", "^(cafe)\s.*", "^(burritos)\s.*",
"^(burguer\sking|burger\sking)\s.*", "^(bip)\s.*", "^(bimbo\sexpendio)\s.*",
"^(burguer|burger)\s.*", "^(ba.os)\s.*", "^(bae)\s.*", "^(bachilleres)\s.*", "^(bachillerato)\s.*",
"^(autosercivio|auto\sservicio)\s.*", "^(autolavado|auto\slavado)\s.*",
"^(autobuses\sla\spiedad|autobuses\sde\sla\piedad)\s.*", "^(arrachera)\s.*",
"^(alsuper\sstore)\s.*", "^(alsuper)\s.*", "^(academia)\s.*", "^(abts)\s.*",
"^(abarrotera\slagunitas)\s.*", "^(abarrotera)\s.*", "^(abarrotes\sy\svinos)\s.*",
"^(abarrotes\sy\sverduras)\s.*", "^(abarrotes\sy\ssemillas)\s.*",
"^(abarrotes\sy\spapeleria)\s.*", "^(abarrotes\sy\snovedades)\s.*", "^(abarrotes\sy\sfruteria)\s.*",
"^(abarrotes\sy\sdeposito)\s.*", "^(abarrotes\sy\scremeria)\s.*", "^(abarrotes\sy\scarniceria)\s.*",
"^(abarrotes\svinos\sy\slicores)\s.*", "^(abarrote|abarrotes|abarotes|abarr|aba|ab)\s.*",
"^(7\seleven)\s.*", "^(7\s24)\s.*"]
client_df["NombreCliente2"] = client_df["NombreCliente"]
for var in special_list:
client_df[var] = client_df["NombreCliente"].str.extract(var, expand=False).str.upper()
replace = client_df.loc[~client_df[var].isnull(), var]
client_df.loc[~client_df[var].isnull(),"NombreCliente2"] = replace
client_df.drop(var, axis=1, inplace=True)
client_df.drop("NombreCliente", axis=1, inplace=True)
client_df.to_csv("../data/cliente_tabla2.csv.gz", compression="gzip", index=False)
def client_anaylsis2():
"""
The idea here is to unify the client ID of several different customers to more broad categories in another
different way
"""
client_df = pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
# clean duplicate spaces in client names
client_df["NombreCliente"] = client_df["NombreCliente"].str.upper()
client_df["NombreCliente"] = client_df["NombreCliente"].apply(lambda x: " ".join(x.split()))
client_df = client_df.drop_duplicates(subset="Cliente_ID")
# --- Begin Filtering for specific terms
# Note that the order of filtering is significant.
# For example:
# The regex of .*ERIA.* will assign "FRUITERIA" to 'Eatery' rather than 'Fresh Market'.
# In other words, the first filters to occur have a bigger priority.
def filter_specific(vf2):
# Known Large Company / Special Group Types
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*REMISION.*', 'Consignment')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*WAL MART.*', '.*SAMS CLUB.*'], 'Walmart', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*OXXO.*', 'Oxxo Store')
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*CONASUPO.*', 'Govt Store')
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*BIMBO.*', 'Bimbo Store')
# General term search for a random assortment of words I picked from looking at
# their frequency of appearance in the data and common spanish words for these categories
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*COLEG.*', '.*UNIV.*', '.*ESCU.*', '.*INSTI.*', \
'.*PREPAR.*'], 'School', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*PUESTO.*', 'Post')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*FARMA.*', '.*HOSPITAL.*', '.*CLINI.*', '.*BOTICA.*'],
'Hospital/Pharmacy', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*CAFE.*', '.*CREMERIA.*', '.*DULCERIA.*', \
'.*REST.*', '.*BURGER.*', '.*TACO.*', '.*TORTA.*', \
'.*TAQUER.*', '.*HOT DOG.*', '.*PIZZA.*' \
'.*COMEDOR.*', '.*ERIA.*', '.*BURGU.*'], 'Eatery',
regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*SUPER.*', 'Supermarket')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*COMERCIAL.*', '.*BODEGA.*', '.*DEPOSITO.*', \
'.*ABARROTES.*', '.*MERCADO.*', '.*CAMBIO.*', \
'.*MARKET.*', '.*MART .*', '.*MINI .*', \
'.*PLAZA.*', '.*MISC.*', '.*ELEVEN.*', '.*EXP.*', \
'.*SNACK.*', '.*PAPELERIA.*', '.*CARNICERIA.*', \
'.*LOCAL.*', '.*COMODIN.*', '.*PROVIDENCIA.*'
], 'General Market/Mart' \
, regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*VERDU.*', '.*FRUT.*'], 'Fresh Market', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*HOTEL.*', '.*MOTEL.*', ".*CASA.*"], 'Hotel', regex=True)
filter_specific(client_df)
# --- Begin filtering for more general terms
# The idea here is to look for names with particles of speech that would
# not appear in a person's name.
# i.e. "Individuals" should not contain any participles or numbers in their names.
def filter_participle(vf2):
vf2['NombreCliente'] = vf2['NombreCliente'].replace([
'.*LA .*', '.*EL .*', '.*DE .*', '.*LOS .*', '.*DEL .*', '.*Y .*', '.*SAN .*', '.*SANTA .*', \
'.*AG .*', '.*LAS .*', '.*MI .*', '.*MA .*', '.*II.*', '.*[0-9]+.*' \
], 'Small Franchise', regex=True)
filter_participle(client_df)
# Any remaining entries should be "Individual" Named Clients, there are some outliers.
# More specific filters could be used in order to reduce the percentage of outliers in this final set.
def filter_remaining(vf2):
def function_word(data):
# Avoid the single-words created so far by checking for upper-case
if (data.isupper()) and (data != "NO IDENTIFICADO"):
return 'Individual'
else:
return data
vf2['NombreCliente'] = vf2['NombreCliente'].map(function_word)
filter_remaining(client_df)
client_df.rename(columns={"NombreCliente": "client_name3"}, inplace=True)
client_df.to_csv("../data/cliente_tabla3.csv.gz", compression="gzip", index=False)
def preprocess(save=False):
start = time.time()
dtype_dict = {"Semana": np.uint8, 'Agencia_ID': np.uint16, 'Canal_ID': np.uint8,
'Ruta_SAK': np.uint16, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16,
'Demanda_uni_equil': np.uint32, "Venta_hoy": np.float32, "Venta_uni_hoy": np.uint32,
"Dev_uni_proxima": np.uint32, "Dev_proxima": np.float32}
train = pd.read_csv("../data/train.csv.zip", compression="zip", dtype=dtype_dict)
test = pd.read_csv("../data/test.csv.zip", compression="zip", dtype=dtype_dict)
# train = train.sample(100000)
# test = test.sample(100000)
# We calculate out-of-sample mean features from most of the training data and only train from the samples in week 9.
# Out-of-sample mean features for training are calculated from all weeks before week 9 and for the test set from
# all weeks including week 9
mean_dataframes = {}
mean_dataframes["train"] = train[train["Semana"]<9].copy()
mean_dataframes["test"] = train.copy()
print("complete train obs: {}".format(len(train)))
print("train week 9 obs: {}".format(len(train[train["Semana"] == 9])))
train = train[train["Semana"] == 9]
# not used in later stages. Was used to find the right hyperparameters for XGBoost. After finding them and to
# obtain the best solution the evaluation data was incorporated into the training data and the hyperparameters
# were used "blindly"
# eval = train.iloc[int(len(train) * 0.75):, :].copy()
# print("eval obs: {}".format(len(eval)))
# mean_dataframes["eval"] = mean_dataframes["test"].iloc[:eval.index.min(), :].copy()
# train = train.iloc[:int(len(train) * 0.75), :]
# print("train obs: {}".format(len(train)))
# read data files and create new client ids
town = pd.read_csv("../data/town_state.csv.zip", compression="zip")
product = pd.read_csv("../data/producto_tabla.csv.zip", compression="zip")
client = pd.read_csv("../data/cliente_tabla.csv.zip", compression="zip")
client2 = pd.read_csv("../data/cliente_tabla2.csv.gz")
client2.rename(columns={"NombreCliente2": "client_name2"}, inplace=True)
client3 = pd.read_csv("../data/cliente_tabla3.csv.gz")
print("Reading data took {:.1f}min".format((time.time()-start)/60))
new_start = time.time()
# Feature Extraction
prod_split = product.NombreProducto.str.split(r"(\s\d+\s?(kg|Kg|g|G|in|ml|pct|p|P|Reb))")
product["product"] = prod_split.apply(lambda x: x[0])
product["brand2"] = product.NombreProducto.str.extract("^.+\s(\D+) \d+$", expand=False)
product['brand'] = prod_split.apply(lambda x: x[-1]).str.split().apply(lambda x: x[:-1])
product['num_brands'] = product.brand.apply(lambda x: len(x))
product['brand'] = prod_split.apply(lambda x: x[-1]).str.split().apply(lambda x: x[:-1]).astype("str")
product['short_name'] = product['product'].str.split(r'[A-Z][A-Z]').apply(lambda x: x[0])
product["beverage"] = product.NombreProducto.str.extract("\d+(ml)", expand=False)
product.loc[product["beverage"].notnull(), "beverage"] = 1
product["beverage"] = | pd.to_numeric(product["beverage"]) | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 17 18:51:11 2019
@author: Meagatron
"""
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt,mpld3
from collections import defaultdict
from sklearn.metrics.pairwise import euclidean_distances
from flask import Flask, render_template, request
import math
import itertools
"""------------- Intialization ------------- """
start=10
end=18
window_size=end-start
skip_offset=2#int(window_size/2)
y_alphabet_size=4
word_lenth=3
ham_distance=1
epsilon = 1e-6
"""------------- import Data -------------"""
data = | pd.read_csv('ecg.csv', sep=',', header=None) | pandas.read_csv |
'''
Created on Aug 24, 2018
@author: Prashant.Pal
'''
import pandas as pd
import matplotlib.pyplot as plt
mylist=['A','B','C','D','E']
mydict = {'X':[1,2,3,4,5],'Y':[20,30,40,50,60],'Z':[50,60,70,80,90]}
mydict1 = {0:[1,2,3,4,5],1:[20,30,40,50,60],2:[50,60,70,80,90]}
#print(mylist[1:])
df = pd.DataFrame(mydict)
print(df)
df1 = | pd.DataFrame(mydict1) | pandas.DataFrame |
"""
Created by: <NAME>
Sep 7
IEEE Fraud Detection Model
- Add back ids
- Add V Features
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import sys
import matplotlib.pylab as plt
from sklearn.model_selection import KFold
from datetime import datetime
import time
import logging
from sklearn.metrics import roc_auc_score
from catboost import CatBoostClassifier, Pool
from timeit import default_timer as timer
start = timer()
##################
# PARAMETERS
###################
run_id = "{:%m%d_%H%M}".format(datetime.now())
KERNEL_RUN = False
MODEL_NUMBER = os.path.basename(__file__).split('.')[0]
if KERNEL_RUN:
INPUT_DIR = '../input/champs-scalar-coupling/'
FE_DIR = '../input/molecule-fe024/'
FOLDS_DIR = '../input/champs-3fold-ids/'
TARGET = "isFraud"
N_ESTIMATORS = 100000
N_META_ESTIMATORS = 500000
LEARNING_RATE = 0.1
VERBOSE = 1000
EARLY_STOPPING_ROUNDS = 500
RANDOM_STATE = 529
N_THREADS = 48
DEPTH = 7
N_FOLDS = 5
MODEL_TYPE = "catboost"
#####################
## SETUP LOGGER
#####################
def get_logger():
"""
credits to: https://www.kaggle.com/ogrellier/user-level-lightgbm-lb-1-4480
"""
os.environ["TZ"] = "US/Eastern"
time.tzset()
FORMAT = "[%(levelname)s]%(asctime)s:%(name)s:%(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger("main")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
fhandler = logging.FileHandler(f'../logs/{MODEL_NUMBER}_{run_id}.log')
formatter = logging.Formatter(FORMAT)
handler.setFormatter(formatter)
# logger.addHandler(handler)
logger.addHandler(fhandler)
return logger
logger = get_logger()
logger.info(f'Running for Model Number {MODEL_NUMBER}')
##################
# PARAMETERS
###################
if MODEL_TYPE == 'xgboost':
EVAL_METRIC = "AUC"
elif MODEL_TYPE == 'lgbm':
EVAL_METRIC = 'AUC'
elif MODEL_TYPE == 'catboost':
EVAL_METRIC = "AUC"
##################
# TRACKING FUNCTION
###################
def update_tracking(run_id,
field,
value, csv_file="../tracking/tracking.csv", integer=False, digits=None, drop_incomplete_rows=False):
"""
Function to update the tracking CSV with information about the model
"""
try:
df = | pd.read_csv(csv_file, index_col=[0]) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from matplotlib.font_manager import FontProperties
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
'Esta version de este codigo, saca los umbrales horarios y estacionalesde las reflectancias'
'en los pixeles seleccionados, cada 15 minutos porque se hace con el set de datos de GOES de'
'2018, debido a que es el mas completo y permitiría obtener los umbrales estacionalmente. La'
'versión antigua de este codigo que los sacaba cada 10 minutos para el horizonte del experi-'
'mento se aloja en la carpetade Backups_VersionesAtiguas_Codigos por si esnecesario volverlo'
'a consultar.'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
## -------------------------------HORAS SOBRE LAS CUALES TRABAJAR----------------------------- ##
HI = '06:00'; HF = '17:59'
#################################################################################################
## -----------------INCORPORANDO LOS DATOS DE RADIACIÓN Y DE LOS EXPERIMENTOS----------------- ##
#################################################################################################
df_P975 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60012018.txt', parse_dates=[2])
df_P350 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60022018.txt', parse_dates=[2])
df_P348 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/60032018.txt', parse_dates=[2])
df_P975 = df_P975.set_index(["fecha_hora"])
df_P975.index = df_P975.index.tz_localize('UTC').tz_convert('America/Bogota')
df_P975.index = df_P975.index.tz_localize(None)
df_P350 = df_P350.set_index(["fecha_hora"])
df_P350.index = df_P350.index.tz_localize('UTC').tz_convert('America/Bogota')
df_P350.index = df_P350.index.tz_localize(None)
df_P348 = df_P348.set_index(["fecha_hora"])
df_P348.index = df_P348.index.tz_localize('UTC').tz_convert('America/Bogota')
df_P348.index = df_P348.index.tz_localize(None)
df_P975.index = pd.to_datetime(df_P975.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P350.index = pd.to_datetime(df_P350.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P348.index = pd.to_datetime(df_P348.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
## ----------------ACOTANDO LOS DATOS A VALORES VÁLIDOS---------------- ##
'Como en este caso lo que interesa es la radiacion, para la filtración de los datos, se'
'considerarán los datos de radiacion mayores a 0.'
df_P975 = df_P975[(df_P975['radiacion'] > 0) ]
df_P350 = df_P350[(df_P350['radiacion'] > 0) ]
df_P348 = df_P348[(df_P348['radiacion'] > 0) ]
df_P975_h = df_P975.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P350_h = df_P350.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P348_h = df_P348.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
##----AJUSTE DE LOS DATOS DE RADIACIÓN REAL AL RANGO DE FECHAS DESEADO-----##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada hora. Las fechas'
'final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=60)
while start_date <= end_date:
yield start_date
start_date += delta
fechas_975 = []
for i in daterange(df_P975.index[0].date().strftime("%Y-%m-%d"), (df_P975.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_975.append(i)
fechas_350 = []
for i in daterange(df_P350.index[0].date().strftime("%Y-%m-%d"), (df_P350.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_350.append(i)
fechas_348 = []
for i in daterange(df_P348.index[0].date().strftime("%Y-%m-%d"), (df_P348.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_348.append(i)
fi_m = min(fechas_975[0].month, fechas_350[0].month, fechas_348[0].month)
fi_d = min(fechas_975[0].day, fechas_350[0].day, fechas_348[0].day)
ff_m = min(fechas_975[-1].month, fechas_350[-1].month, fechas_348[-1].month)
ff_d = min(fechas_975[-1].day, fechas_350[-1].day, fechas_348[-1].day)
## -----------------------------AGREGAR DATOS DE PIRANOMETRO CADA 15 MINUTOS ------------------------------ ##
df_P348_15m = df_P348.groupby(pd.Grouper(freq="15Min")).mean()
df_P350_15m = df_P350.groupby(pd.Grouper(freq="15Min")).mean()
df_P975_15m = df_P975.groupby(pd.Grouper(freq="15Min")).mean()
df_P348_15m = df_P348_15m.between_time(HI, HF)
df_P350_15m = df_P350_15m.between_time(HI, HF)
df_P975_15m = df_P975_15m.between_time(HI, HF)
df_P348_15m = df_P348_15m.loc[~df_P348_15m.index.duplicated(keep='first')]
df_P350_15m = df_P350_15m.loc[~df_P350_15m.index.duplicated(keep='first')]
df_P975_15m = df_P975_15m.loc[~df_P975_15m.index.duplicated(keep='first')]
####################################################################################
## ----------------LECTURA DE LOS DATOS DE GOES CH2 MALLA GENERAL---------------- ##
####################################################################################
Rad = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_2018_2019CH2.npy')
fechas_horas = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_FechasHoras_Anio.npy')
df_fh = pd.DataFrame()
df_fh ['fecha_hora'] = fechas_horas
df_fh['fecha_hora'] = pd.to_datetime(df_fh['fecha_hora'], format="%Y-%m-%d %H:%M", errors='coerce')
df_fh.index = df_fh['fecha_hora']
w = pd.date_range(df_fh.index.min(), df_fh.index.max()).difference(df_fh.index)
df_fh = df_fh[df_fh.index.hour != 5]
#################################################################################################
##-------------------LECTURA DE LOS DATOS DE CH2 GOES PARA CADA PIXEL--------------------------##
#################################################################################################
Rad_pixel_975 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_pix975_Anio.npy')
Rad_pixel_350 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_pix350_Anio.npy')
Rad_pixel_348 = np.load('/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_pix348_Anio.npy')
fechas_horas = df_fh['fecha_hora'].values
## -- Creación de dataframe de radiancias
Rad_df_975 = pd.DataFrame()
Rad_df_975['Fecha_Hora'] = fechas_horas
Rad_df_975['Radiacias'] = Rad_pixel_975
Rad_df_975['Fecha_Hora'] = pd.to_datetime(Rad_df_975['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_975.index = Rad_df_975['Fecha_Hora']
Rad_df_975 = Rad_df_975.drop(['Fecha_Hora'], axis=1)
Rad_df_975 = Rad_df_975.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Rad_df_975_h = Rad_df_975.groupby(pd.Grouper(freq="H")).mean()
Rad_df_350 = pd.DataFrame()
Rad_df_350['Fecha_Hora'] = fechas_horas
Rad_df_350['Radiacias'] = Rad_pixel_350
Rad_df_350['Fecha_Hora'] = pd.to_datetime(Rad_df_350['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_350.index = Rad_df_350['Fecha_Hora']
Rad_df_350 = Rad_df_350.drop(['Fecha_Hora'], axis=1)
Rad_df_350 = Rad_df_350.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Rad_df_350_h = Rad_df_350.groupby(pd.Grouper(freq="H")).mean()
Rad_df_348 = pd.DataFrame()
Rad_df_348['Fecha_Hora'] = fechas_horas
Rad_df_348['Radiacias'] = Rad_pixel_348
Rad_df_348['Fecha_Hora'] = pd.to_datetime(Rad_df_348['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_348.index = Rad_df_348['Fecha_Hora']
Rad_df_348 = Rad_df_348.drop(['Fecha_Hora'], axis=1)
Rad_df_348 = Rad_df_348.between_time('06:00', '18:00') ##--> Seleccionar solo los datos de horas del dia
Rad_df_348_h = Rad_df_348.groupby(pd.Grouper(freq="H")).mean()
def time_mod(time, delta, epoch=None):
if epoch is None:
epoch = datetime.datetime(1970, 1, 1, tzinfo=time.tzinfo)
return (time - epoch) % delta
def time_round(time, delta, epoch=None):
mod = time_mod(time, delta, epoch)
if mod < (delta / 2):
return time - mod
return time + (delta - mod)
Rad_df_348.index = [time_round(Rad_df_348.index[i], datetime.timedelta(minutes=15)) for i in range(len(Rad_df_348.index))]
Rad_df_350.index = [time_round(Rad_df_350.index[i], datetime.timedelta(minutes=15)) for i in range(len(Rad_df_350.index))]
Rad_df_975.index = [time_round(Rad_df_975.index[i], datetime.timedelta(minutes=15)) for i in range(len(Rad_df_975.index))]
Rad_df_348 = Rad_df_348.loc[~Rad_df_348.index.duplicated(keep='first')]
Rad_df_350 = Rad_df_350.loc[~Rad_df_350.index.duplicated(keep='first')]
Rad_df_975 = Rad_df_975.loc[~Rad_df_975.index.duplicated(keep='first')]
##----------------------------------ACOTANDOLO A LOS DATOS DE SOLO EL 2018---------------------------------##
Rad_df_975 = Rad_df_975[Rad_df_975.index.year==2018]
Rad_df_350 = Rad_df_350[Rad_df_350.index.year==2018]
Rad_df_348 = Rad_df_348[Rad_df_348.index.year==2018]
###############################################################################################################################
## ---------------------------------LECTURA DE LOS DATOS DE RADIACIÓN TEORICA KUMAR----------------------------------------- ##
###############################################################################################################################
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada 10 min. Las fechas final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=10)
while start_date <= end_date:
yield start_date
start_date += delta
def serie_Kumar_Model(estacion):
'Retorna un dataframe horario con la radiacion teórico con las recomendacione de Kumar elaborado por <NAME> ' \
'para el AMVA y su tesis. El dataframe original se le ordenan los datos a 12 meses ascendentes (2018), aunque pueden ' \
' pertencer a años difernetes. El resultado es para el punto seleccionado y con el archivo de Total_Timeseries.csv. Actualizar el año'
data_Model = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Radiacion_GIS/Teoricos_Totales/Total_Timeseries_Rad_2018.csv',
sep=',')
fecha_hora = [pd.to_datetime(data_Model['Unnamed: 0'], format="%Y-%m-%d %H:%M:%S")[i].to_pydatetime() for i in
range(len(data_Model['Unnamed: 0']))]
data_Model.index = fecha_hora
data_Model = data_Model.sort_index()
data_Model['Month'] = np.array(data_Model.index.month)
data_Model = data_Model.sort_values(by="Month")
fechas = []
for i in daterange('2018-01-01', '2019-01-01'):
fechas.append(i)
fechas = fechas[0:-1]
if estacion == '6001':
punto = data_Model['TS_kumar']
elif estacion == '6002':
punto = data_Model['CI_kumar']
elif estacion == '6003':
punto = data_Model['JV_kumar']
Rad_teorica = []
for i in range(len(fechas)):
mes = fechas[i].month
hora = fechas[i].hour
mint = fechas[i].minute
rad = \
np.where((data_Model.index.month == mes) & (data_Model.index.hour == hora) & (data_Model.index.minute == mint))[
0]
if len(rad) == 0:
Rad_teorica.append(np.nan)
else:
Rad_teorica.append(punto.iloc[rad].values[0])
data_Theorical = pd.DataFrame()
data_Theorical['fecha_hora'] = fechas
data_Theorical['Radiacion'] = Rad_teorica
data_Theorical.index = data_Theorical['fecha_hora']
data_Theorical = data_Theorical[data_Theorical['Radiacion'] > 0]
data_hourly_theoric = data_Theorical.groupby(pd.Grouper(freq="H")).mean()
return data_hourly_theoric, data_Theorical
df_hourly_theoric_348, df_Theorical_348 = serie_Kumar_Model('6003')
df_hourly_theoric_350, df_Theorical_350 = serie_Kumar_Model('6002')
df_hourly_theoric_975, df_Theorical_975 = serie_Kumar_Model('6001')
######################################################################################################################
## -----------------------------ACOTAR LOS DATOS DE LA RAD TEÓRICA A LOS DE RADIACION------------------------------ ##
######################################################################################################################
df_hourly_theoric_348 = df_hourly_theoric_348[(df_hourly_theoric_348.index >= '2018-'+'0'+str(df_P348.index.month[0])
+'-'+str(df_P348.index.day[0])) & (df_hourly_theoric_348.index <= '2018-'+str(df_P348.index.month[-1])
+'-'+str(df_P348.index.day[-1]))]
df_hourly_theoric_350 = df_hourly_theoric_350[(df_hourly_theoric_350.index >= '2018-'+'0'+str(df_P350.index.month[0])
+'-'+str(df_P350.index.day[0])) & (df_hourly_theoric_350.index <= '2018-'+str(df_P350.index.month[-1])
+'-'+str(df_P350.index.day[-1]))]
df_hourly_theoric_975 = df_hourly_theoric_975[(df_hourly_theoric_975.index >= '2018-'+'0'+str(df_P975.index.month[0])
+'-'+str(df_P975.index.day[0])) & (df_hourly_theoric_975.index <= '2018-'+str(df_P975.index.month[-1])
+'-'+str(df_P975.index.day[-1]))]
df_Theorical_348 = df_Theorical_348[(df_Theorical_348.index >= '2018-'+'0'+str(df_P348.index.month[0])
+'-'+str(df_P348.index.day[0])) & (df_Theorical_348.index <= '2018-'+str(df_P348.index.month[-1])
+'-'+str(df_P348.index.day[-1]))]
df_Theorical_350 = df_Theorical_350[(df_Theorical_350.index >= '2018-'+'0'+str(df_P350.index.month[0])
+'-'+str(df_P350.index.day[0])) & (df_Theorical_350.index <= '2018-'+str(df_P350.index.month[-1])
+'-'+str(df_P350.index.day[-1]))]
df_Theorical_975 = df_Theorical_975[(df_Theorical_975.index >= '2018-'+'0'+str(df_P975.index.month[0])
+'-'+str(df_P975.index.day[0])) & (df_Theorical_975.index <= '2018-'+str(df_P975.index.month[-1])
+'-'+str(df_P975.index.day[-1]))]
df_Theorical_348.index = [time_round(df_Theorical_348.index[i], datetime.timedelta(minutes=15)) for i in range(len(df_Theorical_348.index))]
df_Theorical_350.index = [time_round(df_Theorical_350.index[i], datetime.timedelta(minutes=15)) for i in range(len(df_Theorical_350.index))]
df_Theorical_975.index = [time_round(df_Theorical_975.index[i], datetime.timedelta(minutes=15)) for i in range(len(df_Theorical_975.index))]
df_Theorical_348 = df_Theorical_348.drop(['fecha_hora'], axis=1)
df_Theorical_348 = df_Theorical_348.loc[~df_Theorical_348.index.duplicated(keep='first')]
df_Theorical_350 = df_Theorical_350.drop(['fecha_hora'], axis=1)
df_Theorical_350 = df_Theorical_350.loc[~df_Theorical_350.index.duplicated(keep='first')]
df_Theorical_975 = df_Theorical_975.drop(['fecha_hora'], axis=1)
df_Theorical_975 = df_Theorical_975.loc[~df_Theorical_975.index.duplicated(keep='first')]
##################################################################################################################
## -----------------------------ANÁLISIS DE RESULTADOS Y DETERMINACIÓN DEL UMBRAL------------------------------ ##
##################################################################################################################
'Se determina las reflectancias para las condiciones despejadas con el piranometro cada 15 minutos. Para detectar las reflectancias '
'nubadas en cada punto, se detectan por las derivadas discriminando por mañana y tarde. Los estoy poniendo por ahora con el máximo.'
df_result_348 = | pd.concat([df_P348_15m, Rad_df_348, df_Theorical_348], axis=1) | pandas.concat |
import requests
from typing import List
import re
# from nciRetriever.updateFC import updateFC
# from nciRetriever.csvToArcgisPro import csvToArcgisPro
# from nciRetriever.geocode import geocodeSites
# from nciRetriever.createRelationships import createRelationships
# from nciRetriever.zipGdb import zipGdb
# from nciRetriever.updateItem import update
# from nciRetriever.removeTables import removeTables
from datetime import date
import pandas as pd
import logging
from urllib.parse import urljoin
import json
import time
import sys
import os
from pprint import pprint
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
today = date.today()
# nciThesaurus = pd.read_csv('thesaurus.csv')
# uniqueMainDiseasesDf = pd.read_csv('nciUniqueMainDiseasesReference.csv')
# uniqueSubTypeDiseasesDf = pd.read_csv('nciUniqueSubTypeDiseasesReference.csv')
# uniqueDiseasesWithoutSynonymsDf = pd.read_csv('nciUniqueDiseasesWithoutSynonymsReference.csv')
def createTrialDict(trial: dict) -> dict:
trialDict = {'nciId': trial['nci_id'],
'protocolId': trial['protocol_id'],
'nctId': trial['nct_id'],
'detailDesc': trial['detail_description'],
'officialTitle': trial['official_title'],
'briefTitle': trial['brief_title'],
'briefDesc': trial['brief_summary'],
'phase': trial['phase'],
'leadOrg': trial['lead_org'],
'amendmentDate': trial['amendment_date'],
'primaryPurpose': trial['primary_purpose'],
'currentTrialStatus': trial['current_trial_status'],
'startDate': trial['start_date']}
if 'completion_date' in trial.keys():
trialDict.update({'completionDate': trial['completion_date']})
if 'active_sites_count' in trial.keys():
trialDict.update({'activeSitesCount': trial['active_sites_count']})
if 'max_age_in_years' in trial['eligibility']['structured'].keys():
trialDict.update({'maxAgeInYears': int(trial['eligibility']['structured']['max_age_in_years'])})
if 'min_age_in_years' in trial['eligibility']['structured'].keys():
trialDict.update({'minAgeInYears': int(trial['eligibility']['structured']['min_age_in_years']) if trial['eligibility']['structured']['min_age_in_years'] is not None else None})
if 'gender' in trial['eligibility']['structured'].keys():
trialDict.update({'gender': trial['eligibility']['structured']['gender']})
if 'accepts_healthy_volunteers' in trial['eligibility']['structured'].keys():
trialDict.update({'acceptsHealthyVolunteers': trial['eligibility']['structured']['accepts_healthy_volunteers']})
if 'study_source' in trial.keys():
trialDict.update({'studySource': trial['study_source']})
if 'study_protocol_type' in trial.keys():
trialDict.update({'studyProtocolType': trial['study_protocol_type']})
if 'record_verification_date' in trial.keys():
trialDict.update({'recordVerificationDate': trial['record_verification_date']})
return trialDict
def createSiteDict(trial:dict, site:dict) -> dict:
siteDict = {'nciId': trial['nci_id'],
'orgStateOrProvince': site['org_state_or_province'],
'contactName': site['contact_name'],
'contactPhone': site['contact_phone'],
'recruitmentStatusDate': site['recruitment_status_date'],
'orgAddressLine1': site['org_address_line_1'],
'orgAddressLine2': site['org_address_line_2'],
'orgVa': site['org_va'],
'orgTty': site['org_tty'],
'orgFamily': site['org_family'],
'orgPostalCode': site['org_postal_code'],
'contactEmail': site['contact_email'],
'recruitmentStatus': site['recruitment_status'],
'orgCity': site['org_city'],
'orgEmail': site['org_email'],
'orgCountry': site['org_country'],
'orgFax': site['org_fax'],
'orgPhone': site['org_phone'],
'orgName': site['org_name']
}
# if 'org_coordinates' in site.keys():
# siteDict['lat'] = site['org_coordinates']['lat']
# siteDict['long'] = site['org_coordinates']['lon']
return siteDict
def createBiomarkersDicts(trial:dict, marker:dict) -> List[dict]:
parsedBiomarkers = []
for name in [*marker['synonyms'], marker['name']]:
biomarkerDict = {
'nciId': trial['nci_id'],
'nciThesaurusConceptId': marker['nci_thesaurus_concept_id'],
'name': name,
'assayPurpose': marker['assay_purpose']
}
if 'eligibility_criterion' in marker.keys():
biomarkerDict.update({'eligibilityCriterion': marker['eligibility_criterion']})
if 'inclusion_indicator' in marker.keys():
biomarkerDict.update({'inclusionIndicator': marker['inclusion_indicator']})
parsedBiomarkers.append(biomarkerDict)
return parsedBiomarkers
def createMainBiomarkersDict(trial:dict, marker:dict) -> dict:
parsedBiomarker = {
'nciId': trial['nci_id'],
'nciThesaurusConceptId': marker['nci_thesaurus_concept_id'],
'name': marker['name'],
'assayPurpose': marker['assay_purpose'],
}
if 'eligibility_criterion' in marker.keys():
parsedBiomarker.update({'eligibilityCriterion': marker['eligibility_criterion']})
if 'inclusion_indicator' in marker.keys():
parsedBiomarker.update({'inclusionIndicator': marker['inclusion_indicator']})
return parsedBiomarker
def createDiseasesDicts(trial:dict, disease:dict) -> List[dict]:
parsedDiseases = []
try:
names = [disease['name']]
if 'synonyms' in disease.keys():
names.extend(disease['synonyms'])
except KeyError:
logger.error(f'Invalid key for diseases. Possible keys: {disease.keys()}')
return parsedDiseases
for name in names:
diseaseDict = {
'inclusionIndicator': disease['inclusion_indicator'],
'isLeadDisease': disease['is_lead_disease'],
'name': name,
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'nciId': trial['nci_id']
}
parsedDiseases.append(diseaseDict)
return parsedDiseases
def createMainToSubTypeRelDicts(trial:dict, disease:dict) -> List[dict]:
if 'subtype' not in disease['type']:
return []
relDicts = []
for parent in disease['parents']:
relDicts.append({
'maintype': parent,
'subtype': disease['nci_thesaurus_concept_id']
})
return relDicts
def createDiseasesWithoutSynonymsDict(trial:dict, disease:dict) -> dict:
# diseaseDict = {
# 'nciId': trial['nci_id'],
# 'inclusionIndicator': disease['inclusion_indicator'],
# 'isLeadDisease': disease['is_lead_disease'],
# 'nciThesaurusConceptId': disease['nci_thesaurus_concept_id']
# }
# correctDisease = uniqueDiseasesWithoutSynonymsDf.loc[uniqueDiseasesWithoutSynonymsDf['nciThesaurusConceptId'] == disease['nci_thesaurus_concept_id']]
# if correctDisease.empty:
# logger.error('Disease not found in full reference. Aborting insertion...')
# return {}
# # logger.debug(correctDisease['name'].values[0])
# # time.sleep(2)
# diseaseDict.update({
# 'name': correctDisease['name'].values[0]
# })
# return diseaseDict
try:
return {
'nciId': trial['nci_id'],
'name': disease['name'],
'isLeadDisease': disease['is_lead_disease'],
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'inclusionIndicator': disease['inclusion_indicator']
}
except KeyError:
logger.error('Invalid key for main diseases. Not adding to list...')
return {}
def createMainDiseasesDict(trial:dict, disease:dict) -> dict:
# diseaseDict = {
# 'nciId': trial['nci_id'],
# 'inclusionIndicator': disease['inclusion_indicator'],
# 'isLeadDisease': disease['is_lead_disease'],
# 'nciThesaurusConceptId': disease['nci_thesaurus_concept_id']
# }
# correctDisease = uniqueMainDiseasesDf.loc[uniqueMainDiseasesDf['nciThesaurusConceptId'] == disease['nci_thesaurus_concept_id']]
# if correctDisease.empty:
# return {}
# diseaseDict.update({
# 'name': correctDisease['name'].values[0]
# })
# return diseaseDict
# if 'type' not in disease.keys():
# return {}
if 'maintype' not in disease['type']:
return {}
try:
return {
'nciId': trial['nci_id'],
'name': disease['name'],
'isLeadDisease': disease['is_lead_disease'],
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'inclusionIndicator': disease['inclusion_indicator']
}
except KeyError:
logger.error('Invalid key for main diseases. Not adding to list...')
return {}
def createSubTypeDiseasesDict(trial:dict, disease:dict) -> dict:
# diseaseDict = {
# 'nciId': trial['nci_id'],
# 'inclusionIndicator': disease['inclusion_indicator'],
# 'isLeadDisease': disease['is_lead_disease'],
# 'nciThesaurusConceptId': disease['nci_thesaurus_concept_id']
# }
# correctDisease = uniqueSubTypeDiseasesDf.loc[uniqueSubTypeDiseasesDf['nciThesaurusConceptId'] == disease['nci_thesaurus_concept_id']]
# if correctDisease.empty:
# return {}
# diseaseDict.update({
# 'name': correctDisease['name'].values[0]
# })
# return diseaseDict
# if 'type' not in disease.keys():
# return {}
if 'subtype' not in disease['type']:
return {}
try:
return {
'nciId': trial['nci_id'],
'name': disease['name'],
'isLeadDisease': disease['is_lead_disease'],
'nciThesaurusConceptId': disease['nci_thesaurus_concept_id'],
'inclusionIndicator': disease['inclusion_indicator']
}
except KeyError:
logger.error('Invalid key for subtype diseases. Not adding to list...')
return {}
def createArmsDict(trial:dict, arm:dict) -> dict:
parsedArm = re.sub(r'\(.+\)', '', arm['name'])
parsedArm = re.sub(r'\s+', '_', parsedArm.strip())
return {
'nciId': trial['nci_id'],
'name': arm['name'],
'nciIdWithName': f'{trial["nci_id"]}_{parsedArm}',
'description': arm['description'],
'type': arm['type']
}
def createInterventionsDicts(trial:dict, arm:dict) -> List[dict]:
parsedInterventions = []
parsedArm = re.sub(r'\(.+\)', '', arm['name'])
parsedArm = re.sub(r'\s+', '_', parsedArm.strip())
for intervention in arm['interventions']:
names = intervention['synonyms']
if 'name' in intervention.keys():
names.append(intervention['name'])
elif 'intervention_name' in intervention.keys():
names.append(intervention['intervention_name'])
for name in names:
try:
interventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['intervention_type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': name,
'category': intervention['category'],
'nciThesaurusConceptId': intervention['intervention_code'],
'description': intervention['intervention_description']
}
except KeyError:
try:
interventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': name,
'category': intervention['category'],
'nciThesaurusConceptId': intervention['nci_thesaurus_concept_id'],
'description': intervention['description']
}
except KeyError as e:
logger.exception(e)
logger.error(f'Invalid intervention keys. Possible keys are: {intervention.keys()}')
continue
parsedInterventions.append(interventionDict)
return parsedInterventions
def createMainInterventionDicts(trial:dict, arm:dict) -> List[dict]:
parsedArm = re.sub(r'\(.+\)', '', arm['name'])
parsedArm = re.sub(r'\s+', '_', parsedArm.strip())
parsedMainInterventions = []
for intervention in arm['interventions']:
try:
mainInterventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['intervention_type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': intervention['intervention_name'],
'category': intervention['category'],
'nciThesaurusConceptId': intervention['intervention_code'],
'description': intervention['intervention_description']
}
except KeyError:
try:
mainInterventionDict = {
'nciId': trial['nci_id'],
'arm': arm['name'],
'nciIdWithArm': f'{trial["nci_id"]}_{parsedArm}',
'type': intervention['type'],
'inclusionIndicator': intervention['inclusion_indicator'],
'name': intervention['name'],
'category': intervention['category'],
'nciThesaurusConceptId': intervention['nci_thesaurus_concept_id'],
'description': intervention['description']
}
except KeyError:
logger.error(f'Unexpected intervention keys: {intervention.keys()}. Not inserting...')
continue
parsedMainInterventions.append(mainInterventionDict)
return parsedMainInterventions
def deDuplicateTable(csvName:str, deduplicationList:List[str]):
df = pd.read_csv(csvName)
df.drop_duplicates(subset=deduplicationList, inplace=True)
df.to_csv(csvName, index=False)
def correctMainToSubTypeTable(today):
mainDf = pd.read_csv(f'nciUniqueMainDiseases{today}.csv')
subTypeDf = pd.read_csv(f'nciUniqueSubTypeDiseases{today}.csv')
relDf = pd.read_csv(f'MainToSubTypeRelTable{today}.csv')
for idx, row in relDf.iterrows():
parentId = row['maintype']
if parentId in mainDf['nciThesaurusConceptId'].values:
continue
elif parentId in subTypeDf['nciThesaurusConceptId'].values:
while True:
possibleMainTypesDf = relDf[relDf['subtype'] == parentId]
if possibleMainTypesDf.empty:
logger.error(f'Parent {parentId} not found in main diseases or subtype diseases')
parentId = ''
break
#setting the parentId value with the parent of the subtype found
for value in possibleMainTypesDf['maintype'].values:
if parentId == value:
continue
parentId = value
break
else:
logger.error(f'Parent {parentId} not found in main diseases or subtype diseases')
parentId = ''
break
# parentId = possibleMainTypesDf['maintype'].values[0]
if parentId in mainDf['nciThesaurusConceptId'].values:
break
if parentId == '':
continue
relDf.iloc[idx]['maintype'] = parentId
else:
pass
relDf.to_csv(f'MainToSubTypeRelTable{today}.csv', index=False)
# logger.error(f'maintype id {parentId} is not found in main diseases or subtype diseases')
def createUniqueSitesCsv(today):
logger.debug('Reading sites...')
sitesDf = pd.read_csv(f'nciSites{today}.csv')
logger.debug('Dropping duplicates and trial-depedent information...')
sitesDf.drop_duplicates(subset='orgName', inplace=True)
sitesDf.drop(['recruitmentStatusDate', 'recruitmentStatus', 'nciId'], axis=1, inplace=True)
logger.debug('Saving unique sites table...')
sitesDf.to_csv(f'nciUniqueSites{today}.csv', index=False)
def createUniqueDiseasesWithoutSynonymsCsv(today):
logger.debug('Reading diseases without synonyms...')
diseasesWithoutSynonymsDf = pd.read_csv(f'nciDiseasesWithoutSynonyms{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
diseasesWithoutSynonymsDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
diseasesWithoutSynonymsDf.drop(['isLeadDisease', 'inclusionIndicator', 'nciId'], axis=1, inplace=True)
diseasesWithoutSynonymsDf.dropna()
logger.debug('Saving unique diseases table...')
diseasesWithoutSynonymsDf.to_csv(f'nciUniqueDiseasesWithoutSynonyms{today}.csv', index=False)
def createUniqueMainDiseasesCsv(today):
logger.debug('Reading main diseases...')
mainDiseasesDf = pd.read_csv(f'nciMainDiseases{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
mainDiseasesDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
mainDiseasesDf.drop(['isLeadDisease', 'inclusionIndicator', 'nciId'], axis=1, inplace=True)
mainDiseasesDf.dropna()
logger.debug('Saving unique diseases table...')
mainDiseasesDf.to_csv(f'nciUniqueMainDiseases{today}.csv', index=False)
def createUniqueSubTypeDiseasesCsv(today):
logger.debug('Reading main diseases...')
subTypeDiseasesDf = pd.read_csv(f'nciSubTypeDiseases{today}.csv')
logger.debug('Dropping duplicates and trial-dependent information...')
subTypeDiseasesDf.drop_duplicates(subset='nciThesaurusConceptId', inplace=True)
subTypeDiseasesDf.drop(['isLeadDisease', 'inclusionIndicator', 'nciId'], axis=1, inplace=True)
subTypeDiseasesDf.dropna()
logger.debug('Saving unique diseases table...')
subTypeDiseasesDf.to_csv(f'nciUniqueSubTypeDiseases{today}.csv', index=False)
def createUniqueBiomarkersCsv(today):
logger.debug('Reading main biomarkers...')
mainBiomarkersDf = | pd.read_csv(f'nciMainBiomarkers{today}.csv') | pandas.read_csv |
"""
Functions to create charts.
"""
import altair as alt
import altair_saver
import os
import pandas as pd
from processing_utils import default_parameters
from processing_utils import utils
from IPython.display import display, SVG
alt.renderers.enable('altair_saver', fmts=['svg'])
def show_svg(image_name):
image_path = f"../notebooks/{image_name}.svg"
altair_saver.save(image_name, image_path)
display(SVG(filename = image_path))
os.remove(image_path)
#---------------------------------------------------------------#
# Chart parameters
#---------------------------------------------------------------#
navy = "#0A4C6A"
maroon = "#F3324C"
green = "#32D486"
orange = "#FCA800"
blue = "#1696D2"
gray = "#797C7C"
purple = "#6B1F84"
orange = "#F7AE1D"
yellow = "#D0E700"
# These colors are used for the shading on cases/deaths
light_gray = "#EAEBEB"
navy_outline = "#052838"
blue_outline = "#1277A5"
# Used on vaccinations charts
dark_gray = "#323434"
title_font_size = 10
font_name = "Arial"
grid_opacity = 0.4
domain_opacity = 0.4
stroke_opacity = 0
time_unit = "monthdate"
chart_width = 300
chart_height = 200
bin_spacing = 100
fulldate_format = "%-m/%-d/%y"
monthdate_format = "%-m/%-d"
two_weeks_ago = default_parameters.two_weeks_ago
# If chart_width needs to be adjusted because of the legend
scaling_factor = 0.85
#---------------------------------------------------------------#
# Case Data (County, State, MSA, City of LA)
#---------------------------------------------------------------#
def setup_cases_deaths_chart(df, geog, name):
# Define chart titles
if geog == "county":
chart_title = f"{name} County"
if geog == "state":
chart_title = f"{name}"
if geog == "msa":
chart_title = f"{name} MSA"
# Add City of LA to this geog
if geog == "lacity":
chart_title = f"{name}"
# Set y-axis min/max, since sometimes, numbers dip below 0, which would be mistakes
# Can't just set min, must set min/max
# Alternatively, can drop the values that fall below 0 or set to 0?
df = df[(df.cases_avg7 > 0) & (df.deaths_avg7 > 0)]
cases_max = df.cases_avg7.max()
deaths_max = df.deaths_avg7.max()
# Set up base charts
base = (alt.Chart(
df.drop(columns = "date"))
.mark_line()
.encode(
x=alt.X("date2",
title="date", axis=alt.Axis(format=fulldate_format))
)
)
base_2weeks = (
alt.Chart(df[df.date >= two_weeks_ago].drop(columns = "date"))
.mark_line()
.encode(
x=alt.X("date2",
title="date", axis=alt.Axis(format=fulldate_format))
)
)
tier_base = (base.mark_line(strokeDash=[2,3], clip=True, tooltip=True))
# Make cases charts
cases_line = (
base
.mark_line(tooltip=True)
.encode(
y=alt.Y("cases_avg7:Q", title="7-day avg", scale=alt.Scale(domain=[0, cases_max])),
color=alt.value(navy),
tooltip=['county',
alt.Tooltip('date2:T', format=fulldate_format, title="date"),
alt.Tooltip('cases_avg7:Q', format=',.2f'),
]
)
)
cases_shaded = (
base_2weeks
.mark_area()
.encode(
y=alt.Y("cases_avg7:Q", title="7-day avg"),
color=alt.value(light_gray)
)
)
tier1_hline = (
tier_base
.encode(y=alt.Y("tier1_case_cutoff:Q"),
color=alt.value(orange),
tooltip=alt.Tooltip("tier1_case_cutoff", format=',.2f'))
)
tier2_hline = (
tier_base
.encode(y=alt.Y("tier2_case_cutoff:Q"),
color=alt.value(maroon),
tooltip=alt.Tooltip("tier2_case_cutoff", format=',.2f'))
)
tier3_hline = (
tier_base
.encode(y=alt.Y("tier3_case_cutoff:Q"),
color=alt.value(purple),
tooltip=alt.Tooltip("tier3_case_cutoff", format=',.2f'))
)
cases_chart = (
(cases_shaded + cases_line +
tier1_hline + tier2_hline + tier3_hline)
.properties(
title=f"{chart_title}: New Cases", width=chart_width, height=chart_height
)
)
# Make deaths chart
deaths_line = (
base
.mark_line(tooltip=True)
.encode(
y=alt.Y("deaths_avg7:Q", title="7-day avg", scale=alt.Scale(domain=[0, deaths_max])),
color=alt.value(blue),
tooltip=['county',
alt.Tooltip('date2:T', format=fulldate_format, title="date"),
alt.Tooltip('deaths_avg7:Q', format=',.2f')]
)
)
deaths_shaded = (
base_2weeks
.mark_area()
.encode(
y=alt.Y("deaths_avg7:Q", title="7-day avg"),
color=alt.value(light_gray)
)
)
deaths_chart = (
(deaths_shaded + deaths_line)
.properties(
title=f"{chart_title}: New Deaths", width=chart_width, height=chart_height
)
)
return cases_chart, deaths_chart
def configure_chart(chart):
chart = (chart
.configure_title(
fontSize=title_font_size, font=font_name, anchor="middle", color="black"
).configure_axis(gridOpacity=grid_opacity, domainOpacity=domain_opacity)
.configure_view(strokeOpacity=stroke_opacity)
)
return chart
def make_cases_deaths_chart(df, geog, name):
cases_chart, deaths_chart = setup_cases_deaths_chart(df, geog, name)
# Cases and deaths chart to display side-by-side
combined_chart = alt.hconcat(cases_chart, deaths_chart)
combined_chart = configure_chart(combined_chart)
show_svg(combined_chart)
#---------------------------------------------------------------#
# Testing Data (LA County and City of LA)
#---------------------------------------------------------------#
def make_la_testing_chart(df, plot_col, chart_title, lower_bound, upper_bound):
chart_width = 900
bar = (
alt.Chart(df)
.mark_bar(color=navy)
.encode(
x=alt.X(
"date2:T",
title="date",
axis=alt.Axis(format=fulldate_format),
),
y=alt.Y(plot_col, title="# Tests"),
)
)
line1 = (
alt.Chart(pd.DataFrame({"y": [lower_bound]}))
.mark_rule(color=maroon, strokeDash=[6, 3])
.encode(y="y")
)
line2 = (
alt.Chart(pd.DataFrame({"y": [upper_bound]}))
.mark_rule(color=maroon, strokeDash=[6, 3])
.encode(y="y")
)
testing_chart = (
(bar + line1 + line2)
.properties(title=chart_title, width=chart_width)
)
testing_chart = configure_chart(testing_chart)
show_svg(testing_chart)
#---------------------------------------------------------------#
# Share of Positive Tests by Week (LA County)
#---------------------------------------------------------------#
def make_la_positive_test_chart(df, positive_lower_bound, positive_upper_bound,
testing_lower_bound, testing_upper_bound,
chart_title1, chart_title2):
num_weeks = len(df.week2.unique())
chart_width = num_weeks * 15
base = (alt.Chart(df)
.mark_bar(binSpacing = bin_spacing)
.encode(
x=alt.X("week2", title="date", sort=None)
)
)
positive_bar = (
base
.mark_bar(color = navy)
.encode(
y=alt.Y("pct_positive", title="Percent",
axis=alt.Axis(format="%")
),
)
)
positive_lower_line = (
alt.Chart(pd.DataFrame({"y": [positive_lower_bound]}))
.mark_rule(color=maroon, strokeDash=[6, 3])
.encode(y="y")
)
positive_upper_line = (
alt.Chart(pd.DataFrame({"y": [positive_upper_bound]}))
.mark_rule(color=maroon, strokeDash=[6, 3])
.encode(y="y")
)
positive_chart = (
(positive_bar + positive_lower_line + positive_upper_line)
.properties(title=chart_title1, width = chart_width)
)
test_bar = (
base
.mark_bar(color = blue)
.encode(
y=alt.Y("weekly_tests", title="# Weekly Tests",),
)
)
num_positive_bar = (
base
.mark_bar(color = gray)
.encode(
y=alt.Y("weekly_cases", title="# Weekly Tests",),
)
)
weekly_test_lower_line = (
alt.Chart(pd.DataFrame({"y": [testing_lower_bound * 7]}))
.mark_rule(color=maroon, strokeDash=[6, 3])
.encode(y="y")
)
weekly_test_upper_line = (
alt.Chart(pd.DataFrame({"y": [testing_upper_bound * 7]}))
.mark_rule(color=maroon, strokeDash=[6, 3])
.encode(y="y")
)
test_chart = (
(test_bar + num_positive_bar + weekly_test_lower_line + weekly_test_upper_line)
.properties(title=chart_title2, width = chart_width)
)
combined_weekly_chart = alt.hconcat(positive_chart, test_chart)
combined_weekly_chart = configure_chart(combined_weekly_chart)
show_svg(combined_weekly_chart)
#---------------------------------------------------------------#
# Hospital Equipment Availability (LA County)
#---------------------------------------------------------------#
def base_hospital_chart(df):
chart = (alt.Chart(df)
.mark_line()
.encode(
x=alt.X("date2:T", title="date",
axis=alt.Axis(format=fulldate_format))
)
)
return chart
def make_lacounty_hospital_chart(df):
chart_width = 350
acute_color = green
icu_color = navy
ventilator_color = orange
skeleton = base_hospital_chart(df)
base = (skeleton.encode(
y=alt.Y("pct_available_avg3", title="3-day avg",
axis=alt.Axis(format="%")
),
color=alt.Color(
"equipment",
scale=alt.Scale(
domain=["Acute Care Beds", "ICU Beds", "Ventilators"],
range=[acute_color, icu_color, ventilator_color],
),
),
)
)
line1 = (
alt.Chart( | pd.DataFrame({"y": [0.3]}) | pandas.DataFrame |
# Copyright 2022 Accenture Global Solutions Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import dataclasses as dc
import typing as tp
import datetime as dt
import decimal
import platform
import pyarrow as pa
import pyarrow.compute as pc
import pandas as pd
import tracdap.rt.metadata as _meta
import tracdap.rt.exceptions as _ex
import tracdap.rt.impl.util as _util
@dc.dataclass(frozen=True)
class DataSpec:
data_item: str
data_def: _meta.DataDefinition
storage_def: _meta.StorageDefinition
schema_def: tp.Optional[_meta.SchemaDefinition]
@dc.dataclass(frozen=True)
class DataPartKey:
@classmethod
def for_root(cls) -> DataPartKey:
return DataPartKey(opaque_key='part_root')
opaque_key: str
@dc.dataclass(frozen=True)
class DataItem:
schema: pa.Schema
table: tp.Optional[pa.Table] = None
batches: tp.Optional[tp.List[pa.RecordBatch]] = None
pandas: tp.Optional[pd.DataFrame] = None
pyspark: tp.Any = None
@dc.dataclass(frozen=True)
class DataView:
trac_schema: _meta.SchemaDefinition
arrow_schema: pa.Schema
parts: tp.Dict[DataPartKey, tp.List[DataItem]]
@staticmethod
def for_trac_schema(trac_schema: _meta.SchemaDefinition):
arrow_schema = DataMapping.trac_to_arrow_schema(trac_schema)
return DataView(trac_schema, arrow_schema, dict())
class _DataInternal:
@staticmethod
def float_dtype_check():
if "Float64Dtype" not in pd.__dict__:
raise _ex.EStartup("TRAC D.A.P. requires Pandas >= 1.2")
class DataMapping:
"""
Map primary data between different supported data frameworks, preserving equivalent data types.
DataMapping is for primary data, to map metadata types and values use
:py:class:`TypeMapping <tracdap.rt.impl.type_system.TypeMapping>` and
:py:class:`TypeMapping <tracdap.rt.impl.type_system.MetadataCodec>`.
"""
__log = _util.logger_for_namespace(_DataInternal.__module__ + ".DataMapping")
# Matches TRAC_ARROW_TYPE_MAPPING in ArrowSchema, tracdap-lib-data
__TRAC_DECIMAL_PRECISION = 38
__TRAC_DECIMAL_SCALE = 12
__TRAC_TIMESTAMP_UNIT = "ms"
__TRAC_TIMESTAMP_ZONE = None
__TRAC_TO_ARROW_BASIC_TYPE_MAPPING = {
_meta.BasicType.BOOLEAN: pa.bool_(),
_meta.BasicType.INTEGER: pa.int64(),
_meta.BasicType.FLOAT: pa.float64(),
_meta.BasicType.DECIMAL: pa.decimal128(__TRAC_DECIMAL_PRECISION, __TRAC_DECIMAL_SCALE),
_meta.BasicType.STRING: pa.utf8(),
_meta.BasicType.DATE: pa.date32(),
_meta.BasicType.DATETIME: pa.timestamp(__TRAC_TIMESTAMP_UNIT, __TRAC_TIMESTAMP_ZONE)
}
# Check the Pandas dtypes for handling floats are available before setting up the type mapping
__PANDAS_FLOAT_DTYPE_CHECK = _DataInternal.float_dtype_check()
__PANDAS_DATETIME_TYPE = pd.to_datetime([]).dtype
# Only partial mapping is possible, decimal and temporal dtypes cannot be mapped this way
__ARROW_TO_PANDAS_TYPE_MAPPING = {
pa.bool_(): pd.BooleanDtype(),
pa.int8(): pd.Int8Dtype(),
pa.int16(): pd.Int16Dtype(),
pa.int32(): pd.Int32Dtype(),
pa.int64(): pd.Int64Dtype(),
pa.uint8(): pd.UInt8Dtype(),
pa.uint16(): pd.UInt16Dtype(),
pa.uint32(): pd.UInt32Dtype(),
pa.uint64(): pd.UInt64Dtype(),
pa.float16(): pd.Float32Dtype(),
pa.float32(): | pd.Float32Dtype() | pandas.Float32Dtype |
import pandas as pd
from pandas._libs.parsers import pandas_dtype
import logging
def input_df_contract(contract_params: dict, df_param=None):
"""
This decorator allow to check properties of a df transformation
Args:
df_param: name of the param of the function that is the input df
contract_params: dict defining the contract of the function in the following format:
contract_dict = {
"col_additions": {
"col_a": "int",
"col_b": "float"
},
"col_deletions": {
"col_c",
"col_d"
},
"col_editions": {
"col_e",
"col_f"
},
"allow_index_edition": False,
"allow_drop_record": True
}
which means that the function must create "col_a", "col_b", delete "col_c", "col_d", must
not modify any column data except "col_e", "col_f", and must not edit the index
here is the list of keys allowed in this dict:
col_additions: dict where keys are column names and values are dtypes (string)
col_deletions: set of str representing the deleted columns
col_editions: set of str representing the modified columns
allow_index_edition: bool indicating if the function modify the index
allow_add_drop_record (bool): indicate if the function can drop some records (ex. when dropna is used)
usage:
when you have a function that takes a df as input::
def super_func(df_input):
just add the annotation to automatically check properties::
@input_df_contract(df_param="df_input", contract_dict={"col_editions": {"col_e","col_f"}})
def super_func(df_input):
"""
contract = DataframeContract(**contract_params)
def func_decorator(func):
# we need to get the name of the param and it position
# if df_param is not set the first positional argument will be assumed
if df_param is not None:
# as df_param can be passed as args or kwargs, we need to know it's position in *args
df_param_name = df_param
df_param_idx = func.__code__.co_varnames.index(df_param)
# func.__code__.co_varnames : tuple of func's params name ordered
# .index(df_param) location of df_param in this tuple
else:
df_param_name = func.__code__.co_varnames[0]
df_param_idx = 0
def func_wrapper(*args, **kwargs):
# check if df_param is passed as *args or **kwargs
if df_param_name in kwargs.keys():
df_in = kwargs[df_param_name] # passed as kwargs
else:
df_in = args[df_param_idx] # passed as args
# call the function
df_out = func(*args, **kwargs)
# check the contract
contract.check_contract(df_in, df_out)
return df_out
return func_wrapper
return func_decorator
class DataframeContract:
def __init__(self, col_additions: dict = None, col_deletions: {str} = None, col_editions: {str} = None,
allow_index_edition: bool = False, allow_add_drop_record: bool = True):
"""
Define a DataFrame transform contract.
Args:
col_additions: dict where keys are column names and values are dtypes (string)
col_deletions: set of str representing the deleted columns
col_editions: set of str representing the modified columns
allow_index_edition: bool indicating if the function modify the index
allow_add_drop_record (bool): indicate if the function can drop some records (ex. when dropna is used)
"""
self.col_additions = col_additions if (col_additions is not None) else dict()
for (key, value) in self.col_additions.items():
self.col_additions[key] = pandas_dtype(value)
self.col_deletions = col_deletions if (col_deletions is not None) else set()
self.col_editions = col_editions if (col_editions is not None) else set()
self.allow_index_edition = allow_index_edition
self.allow_add_drop_record = allow_add_drop_record
def check_contract(self, df_in: pd.DataFrame, df_out: pd.DataFrame):
"""
Check if the two dataFrame respect the contract
Args:
df_in: input of the Transform
df_out: result of the transformation
Returns: None
Raises: an error if one of the conditions of the contract if violated
"""
logger = logging.getLogger(__name__)
dtypes = df_out.dtypes
# check additions
success = self.check_values(expected=set(self.col_additions.keys()),
real=(set(df_out.columns) - set(df_in.columns)),
message="columns addition")
# check additions dtypes
success = success and self.check_values(real=list(dtypes.loc[self.col_additions.keys()]),
expected=list(self.col_additions.values()),
message="column addition dtypes")
# check deletions
success = success and self.check_values(real=(set(df_in.columns) - set(df_out.columns)),
expected=self.col_deletions,
message="columns deletion")
# check index edition
if not self.allow_index_edition:
try:
| pd.testing.assert_index_equal(df_in.index, df_out.index) | pandas.testing.assert_index_equal |
#/Library/Frameworks/Python.framework/Versions/3.6/bin/python3
#
# Author: <NAME>
# Date: 2018-09-26
#
# This script runs all the models on Baxter Dataset subset of onlt cancer and normal samples to predict diagnosis based on OTU data only. This script only evaluates generalization performance of the model.
#
############################# IMPORT MODULES ##################################
import matplotlib
matplotlib.use('Agg') #use Agg backend to be able to use Matplotlib in Flux
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sympy
from scipy import interp
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import GridSearchCV
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from timeit import default_timer as timer
#################################################################################
############################# PRE-PROCESS DATA ##################################
# Import the module I wrote preprocess_data
# In this case we will only need the function process_multidata which preprocesses shared and subsampled mothur generated OTU table and the metadata.This function will give us OTUs and FIT as features, diagnosis as labels.
# If we wanted to use only OTUs and not FIT as a feature, import the function process_data and use that.
#################################################################################
from preprocess_data import process_SRNdata
shared = pd.read_table("data/baxter.0.03.subsample.shared")
meta = pd.read_table("data/metadata.tsv")
# Define x (features) and y (labels)
x, y = process_SRNdata(shared, meta)
# When we use process_multidata:
# x: all the OTUs and FIT as features
# y: labels which are diagnosis of patient (0 is for non-advanced adenomas+normal colon and 1 is for advanced adenomas+carcinomas)
############################ MODEL SELECTION ####################################
# Import the module I wrote model_selection and function select_model
# This function will define the cross-validation method, hyper-parameters to tune and the modeling method based on which models we want to use here.
#################################################################################
from model_selection import select_model
# Define the models you want to use
models = ["L2_Logistic_Regression", "L1_SVM_Linear_Kernel", "L2_SVM_Linear_Kernel", "SVM_RBF", "Random_Forest", "Decision_Tree", "XGBoost"]
############################ TRAINING THE MODEL ###############################
## We will split the dataset 80%-20% and tune hyper-parameter on the 80% training and choose a best model and best hyper-parameters. The chosen best model and hyper-parameters will be tested on the %20 test set that was not seen before during training. This will give a TEST AUC. This is repeated 100 times anf will give 100 TEST AUCs. We call this the outer cross validation/testing.
## To tune the hyper-parameter we also use an inner cross validation that splits to 80-20 and repeats for 100 times. We report Cross-Validation AUC for this inner cross-validation.
## Here we use a for loop to iterate each model method.
#################################################################################
walltimes = []
for models in models:
start = timer()
print(models)
## Generate empty lists to fill with AUC values for test-set
tprs_test = []
aucs_test = []
mean_fpr_test = np.linspace(0, 1, 100)
## Generate empty lists to fill with AUC values for train-set cv
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
## Generate empty lists to fill with hyper-parameter and mean AUC
scores = []
names = []
## Define how many times we will iterate the outer crossvalidation
i=0
epochs= 100
for epoch in range(epochs):
i=i+1
print(i)
## Split dataset to 80% training 20% test sets.
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, shuffle=True, stratify = y)
sc = MinMaxScaler(feature_range=(0, 1))
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
## Define which model, parameters we want to tune and their range, and also the inner cross validation method(n_splits, n_repeats)
model, param_grid, cv = select_model(models)
## Based on the chosen model, create a grid to search for the optimal model
grid = GridSearchCV(estimator = model, param_grid = param_grid, cv = cv, scoring = 'roc_auc', n_jobs=-1)
## Get the grid results and fit to training set
grid_result = grid.fit(x_train, y_train)
## Print out the best model chosen in the grid
print('Best model:', grid_result.best_estimator_)
## Print out the best hyper-parameters chosen in the grid
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
## Calculate the AUC means and standard deviation for each hyper-parameters used during tuning. Print this out.
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# Save the AUC means for each tested hyper-parameter tuning. Append for each outer cross validaion (epoch)
# We want to plot this to see our hyper-parameter tuning performance and budget
scores.append(means)
names.append(i)
parameters=pd.DataFrame(params)
## The best model we pick here will be used for predicting test set.
best_model = grid_result.best_estimator_
## variable assignment to make it easier to read.
X=x_train
Y=y_train
## Calculate the FPR and TPR at each inner-cross validation and append these data to plot ROC curve for cross-validation with n_splits=5 and n_repeats=100 to evaluate the variation of prediction in our training set.
for train, test in cv.split(X,Y):
if models=="L2_Logistic_Regression" or models=="Random_Forest" or models=="XGBoost" or models=="Decision_Tree":
y_score = best_model.fit(X[train], Y[train]).predict_proba(X[test])
fpr, tpr, thresholds = roc_curve(Y[test], y_score[:, 1])
else:
y_score = best_model.fit(X[train], Y[train]).decision_function(X[test])
fpr, tpr, thresholds = roc_curve(Y[test], y_score)
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
print("Train", roc_auc)
## Calculate the FPR and TPR at each outer-cross validation and append these data to plot ROC curve for testing during 100 repeats(epochs) to evaluate the variation of prediction in our testing set.
if models=="L2_Logistic_Regression" or models=="Random_Forest" or models=="XGBoost" or models=="Decision_Tree":
y_score = best_model.fit(x_train, y_train).predict_proba(x_test)
# Compute ROC curve and area the curve
fpr_test, tpr_test, thresholds_test = roc_curve(y_test, y_score[:, 1])
else:
y_score = best_model.fit(x_train, y_train).decision_function(x_test)
# Compute ROC curve and area the curve
fpr_test, tpr_test, thresholds_test = roc_curve(y_test, y_score)
tprs_test.append(interp(mean_fpr_test, fpr_test, tpr_test))
tprs_test[-1][0] = 0.0
roc_auc_test = auc(fpr_test, tpr_test)
aucs_test.append(roc_auc_test)
print("Test", roc_auc_test)
## Plot the ROC curve for inner and outer cross-validation
plt.plot([0, 1], [0, 1], linestyle='--', color='green', label='Random', alpha=.8)
mean_tpr_test = np.mean(tprs_test, axis=0)
mean_tpr_test[-1] = 1.0
mean_auc_test = auc(mean_fpr_test, mean_tpr_test)
std_auc_test = np.std(aucs_test)
plt.plot(mean_fpr_test, mean_tpr_test, color='r', label=r'Never-before-seen test set ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc_test, std_auc_test), lw=2, alpha=.8)
std_tpr_test = np.std(tprs_test, axis=0)
tprs_upper_test = np.minimum(mean_tpr_test + std_tpr_test, 1)
tprs_lower_test = np.maximum(mean_tpr_test - std_tpr_test, 0)
plt.fill_between(mean_fpr_test, tprs_lower_test, tprs_upper_test, color='tomato', alpha=.2, label=r'$\pm$ 1 std. dev.')
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b', label=r'Mean cross-val ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc), lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='dodgerblue', alpha=.2, label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC for %s' % models)
plt.legend(loc="lower right", fontsize=8)
save_results_to = 'results/figures/all_samples/'
plt.savefig(save_results_to + str(models) + ".png", format="PNG", dpi=1000)
plt.clf()
# Save the CV-auc and Test-auc lists to a dataframe and then to a tab-delimited file
cv_aucs= {'AUC':aucs}
cv_aucs_df= pd.DataFrame(cv_aucs)
test_aucs = {'AUC':aucs_test}
test_aucs_df = pd.DataFrame(test_aucs)
concat_aucs_df = | pd.concat([cv_aucs_df,test_aucs_df], axis=1, keys=['Cross-validation','Testing']) | pandas.concat |
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import pandas as pd
from AShareData import AShareDataReader, constants, SHSZTradingCalendar, utils
from AShareData.config import get_db_interface
from AShareData.database_interface import DBInterface
from AShareData.factor import CompactFactor, ContinuousFactor
from AShareData.tickers import StockIndexFutureIndex
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
class MajorIndustryConstitutes(object):
def __init__(self, provider: str, level: int, cap: CompactFactor = None, db_interface: DBInterface = None):
self.db_interface = db_interface if db_interface else get_db_interface()
self.calendar = SHSZTradingCalendar(self.db_interface)
self.date = self.calendar.today()
self.data_reader = AShareDataReader(self.db_interface)
self.industry = self.data_reader.industry(provider=provider, level=level)
self.cap = cap if cap else self.data_reader.stock_free_floating_market_cap
def get_major_constitute(self, name: str, n: int = None):
if name not in self.industry.all_industries:
raise ValueError(f'unknown industry: {name}')
constitute = self.industry.list_constitutes(date=self.date, industry=name)
val = self.cap.get_data(ids=constitute, dates=self.date) / 1e8
if n:
val = val.sort_values(ascending=False)
val = val.head(n)
constitute = val.index.get_level_values('ID').tolist()
sec_name = self.data_reader.sec_name.get_data(ids=constitute, dates=self.date)
pe = self.data_reader.pe_ttm.get_data(ids=constitute, dates=self.date)
pb = self.data_reader.pb.get_data(ids=constitute, dates=self.date)
ret = pd.concat([sec_name, val, pe, pb], axis=1).sort_values(val.name, ascending=False)
return ret
class IndexHighlighter(object):
must_keep_indexes = ['全市场.IND', '全市场等权.IND', '次新股等权.IND', 'ST.IND']
def __init__(self, date: dt.datetime = None, db_interface: DBInterface = None):
self.db_interface = db_interface if db_interface else get_db_interface()
self.calendar = SHSZTradingCalendar(self.db_interface)
if date is None:
date = dt.datetime.combine(dt.date.today(), dt.time())
self.date = date
records = utils.load_excel('自编指数配置.xlsx')
self.tickers = [it['ticker'] for it in records]
self.tbd_indexes = list(set(self.tickers) - set(self.must_keep_indexes))
start_date = self.calendar.offset(date, -22)
index_factor = ContinuousFactor('自合成指数', '收益率', db_interface=self.db_interface)
self.cache = index_factor.get_data(start_date=start_date, end_date=date).unstack()
self.industry_cache = []
def featured_data(self, look_back_period: int, n: int) -> pd.DataFrame:
data = self.cache.iloc[-look_back_period:, :]
data = (data + 1).cumprod()
tmp = data.loc[data.index[-1], self.tbd_indexes].sort_values()
ordered_index = tmp.index.tolist()
cols = ordered_index[:n] + ordered_index[-n:]
self.industry_cache.extend(cols)
return data.loc[:, cols + self.must_keep_indexes] - 1
@staticmethod
def disp_data(data):
print(data.loc[data.index[-1], :].T.sort_values(ascending=False) * 100)
def plot_index(self, period: int, n: int, ax: plt.Axes = None):
plot_data = self.featured_data(period, n) * 100
if ax is None:
_, ax = plt.subplots(1, 1)
plot_data.plot(ax=ax)
ax.set_xlim(left=plot_data.index[0], right=plot_data.index[-1])
ax.grid(True)
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
return ax
def summary(self):
for i, it in enumerate([(3, 3), (5, 3), (20, 3)]):
print(f'回溯{it[0]}天:')
self.disp_data(self.featured_data(it[0], it[1]))
print('')
self.plot_index(20, 3)
mentioned_industry = [it[2:-4] for it in set(self.industry_cache) if it.startswith('申万')]
constitute = MajorIndustryConstitutes(provider='申万', level=2)
for it in mentioned_industry:
print(f'申万2级行业 - {it}')
print(constitute.get_major_constitute(it, 10))
print('')
def major_index_valuation(db_interface: DBInterface = None):
if db_interface is None:
db_interface = get_db_interface()
data = db_interface.read_table('指数日行情', ['市盈率TTM', '市净率']).dropna(how='all')
tmp = data.groupby('ID').rank()
latest = data.groupby('ID').tail(1)
percentile = tmp.groupby('ID').tail(1) / tmp.groupby('ID').max()
percentile.columns = [f'{it}分位' for it in percentile.columns]
ret = pd.concat([latest, percentile], axis=1)
ret = ret.loc[:, sorted(ret.columns)].reset_index()
index_name_dict = dict(zip(constants.STOCK_INDEXES.values(), constants.STOCK_INDEXES.keys()))
ret['ID'] = ret['ID'].map(index_name_dict)
return ret.set_index(['DateTime', 'ID'])
class StockIndexFutureBasis(object):
FUTURE_INDEX_MAP = {'IH': '000016.SH', 'IF': '000300.SH', 'IC': '000905.SH'}
def __init__(self, date: dt.datetime = None, lookback_period: int = 5, db_interface: DBInterface = None):
super().__init__()
self.date = date if date else dt.datetime.combine(dt.date.today(), dt.time())
self.look_back_period = lookback_period
self.db_interface = db_interface if db_interface else get_db_interface()
self.data_reader = AShareDataReader(self.db_interface)
self.cal = SHSZTradingCalendar(self.db_interface)
self.stock_index_tickers = StockIndexFutureIndex(self.db_interface)
def compute(self) -> pd.DataFrame:
start_date = self.cal.offset(self.date, -self.look_back_period)
tickers = self.stock_index_tickers.ticker()
tickers_info = self.db_interface.read_table('期货合约', '最后交易日', ids=tickers).to_frame()
tickers_info['index_ticker'] = [self.FUTURE_INDEX_MAP[it[:2]] for it in tickers_info.index]
index_close = self.data_reader.index_close.get_data(start_date=start_date, end_date=self.date,
ids=list(self.FUTURE_INDEX_MAP.values())).reset_index()
future_close = self.data_reader.future_close.get_data(start_date=start_date, end_date=self.date,
ids=tickers).reset_index()
tmp = pd.merge(future_close, tickers_info, left_on='ID', right_index=True)
df = pd.merge(tmp, index_close, left_on=['DateTime', 'index_ticker'], right_on=['DateTime', 'ID']).rename(
{'ID_x': 'ID'}, axis=1)
df['合约时长'] = (pd.to_da | tetime(df['最后交易日']) | pandas.to_datetime |
import numpy as np
from os.path import join, isfile
from os import chdir, makedirs, remove
import pandas as pd
import matplotlib.pyplot as plt
from shutil import rmtree, copyfile
import subprocess
import glob
import os
def post_disc(exp_name, dtw_thr, zr_root):
"run conn-comp clustering"
chdir(zr_root)
command = './post_disc {} {}'.format(exp_name+'.lsh64', dtw_thr)
print(command)
subprocess.call(command.split())
results_path = join(zr_root, 'exp', exp_name + '.lsh64', 'results')
return results_path
def new_match_dict(last_filepair, new_line):
match = {'f1': last_filepair[0],
'f2': last_filepair[1],
'f1_start': int(new_line[0]),
'f1_end': int(new_line[1]),
'f2_start': int(new_line[2]),
'f2_end': int(new_line[3]),
'score': float(new_line[4])
}
if len(new_line) == 6:
match['rho'] = float(new_line[5])
return match
def read_matches_outfile(match_file_path):
min_frame_th = 5
matches_list = []
match_file = open(match_file_path, 'r')
last_filepair = match_file.readline().strip('\n').split(' ')
# print((last_filepair))
for i, line in enumerate(match_file):
new_line = line.strip('\n').split(' ')
if len(new_line) == 2:
last_filepair = new_line
elif len(new_line) == 6:
if ( (int(new_line[1])-int(new_line[0])) >= min_frame_th ) & ( (int(new_line[3])-int(new_line[2])) >= min_frame_th ):
matches_list.append(new_match_dict(last_filepair, new_line))
else:
print('ERROR: unexpected line: {}'.format(new_line))
# if i >9: break
match_file.close()
return matches_list
def get_matches_df(match_file_path):
# type: (str) -> pd.DataFrame
if 'out' not in match_file_path: # if exp_name is given
match_file_path = join(zr_root, 'exp', match_file_path + '.lsh64', 'matches', 'out.1')
matches_list = read_matches_outfile(match_file_path)
matches_df = pd.DataFrame.from_records(matches_list,
columns=['f1', 'f2', 'f1_start', 'f1_end', 'f2_start', 'f2_end', 'score', 'rho']
)
matches_df = matches_df.astype(dtype={'f1': str, 'f2': str,
'f1_start': int, 'f1_end': int, 'f2_start': int, 'f2_end': int,
'score': float, 'rho': float}
) # type: pd.DataFrame
# print(matches_df.head(3))
print('Read {} matches'.format(len(matches_df)))
return matches_df
def get_matches_all(exp_name, exp_root='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools/exp/'):
outdir_m = os.path.join(exp_root, '{}.lsh64/matches'.format(exp_name) )
outfiles = sorted(glob.glob(outdir_m + '/out.*'))
if (len(outfiles)) == 0:
print('DIRECTORY ERROR', outdir_m)
return 0
out_df_list = []
for name in outfiles:
out_df_list.append( get_matches_df(name) )
# append all matches df to a single df
matches_df = pd.concat(out_df_list, ignore_index=True)
return matches_df
def discovered_fragments(matches_df):
F_disc = | pd.DataFrame(columns=['filename', 'start', 'end']) | pandas.DataFrame |
import pandas as pd # version 1.0.1
# in pandas 1.1.4 dates for INTESA and BMG doesn't work after merge in "final"
from datetime import datetime
# TODO find repetitions and replace them with functions
# for example Santander and CITI files import and adjustment
# or date and amount formatting
pd.options.display.float_format = '{:,.2f}'.format
# don't hide columns
pd.set_option('display.expand_frame_repr', False)
# import list of bank accounts into data frame
balances = pd.read_csv('list_of_accounts.csv', sep=';')
balances = balances.set_index('Account')
print('tabelka z listą wszystkich aktywnych rachunków\n')
print(balances)
print('\nlista kolumn\n')
print(balances.columns)
print()
# import bits of information from ING
ing = pd.read_csv('ING_transakcje_zamkniecie.csv', sep=';', encoding='ANSI',
usecols=['rachunek ING "NRB" (26 znaków)', 'saldo końcowe',
'waluta operacji', 'data wyciągu'])
print('\nUWAGA: plik z ING czasami zawiera błedy. Kolumny się rozjeżdzają. '
'Trzeba poprawić w excelu.\n')
print('\nING bez przeróbek\n')
print(ing)
ing = ing.rename(columns={'rachunek ING "NRB" (26 znaków)': "Account",
"saldo końcowe": "saldo",
'data wyciągu': 'data',
'waluta operacji': 'Currency'})
ing = ing.set_index('Account')
# amount format adjusted
# empty cells need te be removed before next steps to avoid errors
ing = ing.dropna(subset=['saldo'])
ing['saldo'] = ing['saldo'].apply(lambda x: x.replace(',', '.'))
ing['saldo'] = pd.to_numeric(ing['saldo'])
# date format adjusted
ing['data'] = pd.to_datetime(ing['data'], format='%y-%m-%d')
# sorting is necessary to catch the newest values
ing.sort_values(by=['data'], inplace=True, ascending=False)
print()
# index has to be removed for a while to delete the duplicates
ing = ing.reset_index().drop_duplicates(subset='Account',
keep='first').set_index('Account')
print('\nDane z ING bez duplikatów wedłgu powtórzeń w indeksie\n', ing, '\n')
# import bits of information from CITI bank
citifilename = 'CITI_salda_zamkniecie.csv'
colnames = ['Account', 'klient', 'saldo', 'Currency', 'data',
'nazwa_rach', 'nazawa_od', 'oddzial']
citi = pd.read_csv(citifilename, names=colnames, skiprows=1,
parse_dates=True, dayfirst=True)
citi = citi.drop(['klient', 'nazwa_rach', 'nazawa_od', 'oddzial'], axis=1)
# date format adjusted
citidtm = lambda x: datetime.strptime(str(x), "%d/%m/%Y") # 02/08/2019
citi['data'] = citi['data'].apply(citidtm)
citi['data'] = pd.to_datetime(citi['data'])
citi['Account'] = citi['Account'].apply(lambda x: x.replace(' ', ''))
citi = citi.set_index('Account')
print('\nsprawdzam co się wczytuje z CITI\n', citi, '\n')
# import bits of information from Santander bank
# "skiprows" need to be updated if we close or open some bank accounts
santanderfilename = 'Santander_salda_zamkniecie.csv'
san = pd.read_csv(santanderfilename, skiprows=[0, 1, 17, 18, 19],
usecols=['Data', 'Numer rachunku', 'Saldo', 'Unnamed: 8'],
parse_dates=True, sep=';', encoding='ANSI', )
santandervatfilename = 'Santander_VAT_salda_zamkniecie.csv'
sanvat = pd.read_csv(santandervatfilename, skiprows=[0, 1, 6, 7, 8],
usecols=['Data', 'Numer rachunku', 'Saldo', 'Unnamed: 8'],
parse_dates=True, sep=';', encoding='ANSI', )
san_tot = pd.concat([san,sanvat])
san_tot = san_tot.rename(columns={'Numer rachunku': "Account",
"Saldo": "saldo",
'Data': 'data',
'Unnamed: 8': 'Currency'})
san_tot['saldo'] = san_tot['saldo'].apply(lambda x: x.replace(' ', ''))
san_tot['saldo'] = san_tot['saldo'].apply(lambda x: x.replace(',', '.'))
san_tot['saldo'] = pd.to_numeric(san_tot['saldo'])
san_tot['Account'] = san_tot['Account'].apply(lambda x: x.replace(' ', ''))
san_tot = san_tot.set_index('Account')
san_tot['data'] = pd.to_datetime(san_tot['data'], format='%Y-%m-%d')
# In Santander file the date is only in the first row.
# It must be added into the next rows
san_tot['data'] = san_tot.fillna(method="ffill")
print()
print('sprawdzam co mamy w Santanderze\n', san_tot, '\n')
# import bits of information from Santander bank
bmgfilename = 'BMG_salda_zamkniecie.csv'
bmg = pd.read_csv(bmgfilename, skiprows=range(0, 15),
usecols=['Account number', 'Currency',
'Closing', 'Closing book balance'],
parse_dates=True, sep=';', encoding='ANSI', )
bmg = bmg.rename(
columns={'Account number': "Account", "Closing book balance": "saldo",
'Closing': 'data'})
bmg = bmg.set_index('Account')
bmg['data'] = pd.to_datetime(bmg['data'],
format='%Y-%m-%d')
print('\nsprawdzam co się wczytuje z BMG\n\n', bmg, '\n\n')
# import bits of information from INTESA bank
intesafilename = 'INTESA_salda_zamkniecie.csv'
intesa = pd.read_csv(intesafilename, parse_dates=True, sep=';', encoding='ANSI')
intesa = intesa.set_index('Account')
intesa['data'] = | pd.to_datetime(intesa['data'], format='%Y-%m-%d') | pandas.to_datetime |
"""local_mongo_sync_beta_app"""
#code='local_mongo_sync_beta_app'
#mongo_string='mongodb://root:[email protected]:27017/?authSource=admin&readPreference=primary&appname=MongoDB%20Compass&ssl=false'
#es_host = 'localhost:9200'
#from elasticsearch import Elasticsearch
#es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
#auth={'apiKey': '135BE3F9-B1D8-4A14-A999-303286C32275'}
"""local_mongo_sync_prod_app"""
#code='local_mongo_sync_prod_app'
#mongo_string='mongodb://root:[email protected]:27017/admin?authSource=admin&readPreference=primary&appname=MongoDB%20Compass%20Community&ssl=false'
#es_host = 'localhost:9200'
#from elasticsearch import Elasticsearch
#es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
#auth={'apiKey': '135BE3F9-B1D8-4A14-A999-303286C32275'}
"""dev_env_mongo_sync_beta_app"""
code='dev_env_mongo_sync_beta_app'
mongo_string='mongodb://root:[email protected]:27017/?authSource=admin&readPreference=primary&appname=MongoDB%20Compass&ssl=false'
es_host = 'elastic-helm-elasticsearch-coordinating-only'
from elasticsearch import Elasticsearch
es = Elasticsearch([{'host': 'elastic-helm-elasticsearch-coordinating-only'}])
auth={'apiKey': '135BE3F9-B1D8-4A14-A999-303286C32275'}
#
"""beta_env_mongo_sync_prod_app"""
#code='beta_env_mongo_sync_prod_app'
#mongo_string='mongodb://root:[email protected]:27017/admin?authSource=admin&readPreference=primary&appname=MongoDB%20Compass%20Community&ssl=false'
#es_host = 'elastic-helm-elasticsearch-coordinating-only'
#from elasticsearch import Elasticsearch
#es = Elasticsearch([{'host': 'elastic-helm-elasticsearch-coordinating-only'}])
#auth={'apiKey': '135BE3F9-B1D8-4A14-A999-303286C32275'}
"""prod_env_mongo_sync_prod_app"""
#code='prod_env_mongo_sync_prod_app'
#mongo_string='mongodb://root:eXVB5mbmuZ@bighaat-mongo-mongodb-0.bighaat-mongo-mongodb-headless.kisanvedika.svc.cluster.local:27017,bighaat-mongo-mongodb-1.bighaat-mongo-mongodb-headless.kisanvedika.svc.cluster.local:27017,bighaat-mongo-mongodb-2.bighaat-mongo-mongodb-headless.kisanvedika.svc.cluster.local:27017/admin?authSource=admin&compressors=disabled&gssapiServiceName=mongodb'
#es_host = 'elastic-helm-elasticsearch-coordinating-only'
#from elasticsearch import Elasticsearch
#es = Elasticsearch([{'host': 'elastic-helm-elasticsearch-coordinating-only'}])
#auth={'apiKey': '135BE3F9-B1D8-4A14-A999-303286C32275'}
from pymongo import MongoClient
import datetime
import time
from elasticsearch import Elasticsearch
from bson.objectid import ObjectId
from elasticsearch.helpers import scan
#es = Elasticsearch([{'host': 'elastic-helm-elasticsearch-coordinating-only'}])
#es = Elasticsearch("elastic-helm-elasticsearch-coordinating-only")
#es = Elasticsearch("https://elastic:[email protected]:9200/", verify_certs=False, connection_class=RequestsHttpConnection)
#from pandas.io.json import json_normalize
import pandas as pd
import re
import numpy as np
from es_pandas import es_pandas
import warnings
warnings.filterwarnings("ignore")
#es_host = 'elastic-helm-elasticsearch-coordinating-only'
mongo_client = MongoClient(mongo_string)
db = mongo_client.bighaat
import json
import requests
def big_read(index):
es_response = scan(
es,
index=index,
doc_type='_doc',
query={"query": { "match_all" : {}}}
)
return es_response
def logger(message):
message_dict={'code':code,
'message':message}
r=requests.post('https://apibeta.bighaat.com/crop/api/logerror/create-error-log?message={}&api-version=1.0'.format(str(message_dict)),headers=auth)
return r.text
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
if isinstance(o, datetime.datetime):
return str(o)
return json.JSONEncoder.default(self, o)
#try:
# start_time = time.time()
# db = mongo_client.bighaat
# col = db.posts
# cursor = col.find()
# posts = list(cursor)
# status=[]
# for node in posts:
# _id = str(node['_id']).split("'")[0]
# node.pop('_id')
# res=es.index(index='posts_sync',id=_id,body=JSONEncoder().encode(node))
# status.append(res['_id'])
# print("Posts--- %s seconds ---" % (time.time() - start_time_posts))
# logger("Posts--- %s seconds ---" % (time.time() - start_time_posts))
#
# col = db.comments
# cursor = col.find()
# comments = list(cursor)
# status=[]
# for node in comments:
# _id = str(node['_id']).split("'")[0]
# node.pop('_id')
# res=es.index(index='comments_sync',id=_id,body=JSONEncoder().encode(node))
# status.append(res['_id'])
#
# col = db.crops
# cursor = col.find()
# crops = list(cursor)
# status=[]
# for node in crops:
# _id = str(node['_id']).split("'")[0]
# node.pop('_id')
# res=es.index(index='crops_sync',id=_id,body=JSONEncoder().encode(node))
# status.append(res['_id'])
#
# col = db.crop_doctor
# cursor = col.find()
# crops = list(cursor)
# status=[]
# for node in crops:
# _id = str(node['_id']).split("'")[0]
# node.pop('_id')
# res=es.index(index='crop_doc_sync',id=_id,body=JSONEncoder().encode(node))
# status.append(res['_id'])
# print('Done')
# print("--- %s Mongo Init seconds ---" % (time.time() - start_time))
# logger("--- %s Mongo Init seconds ---" % (time.time() - start_time))
#except Exception as e:
# logger('Error in Mongo Init '+str(e))
# print('Error in Mongo Init '+str(e))
while(1):
try:
start_time = time.time()
start_time_posts = time.time()
col = db.posts
cursor = col.find()
posts = list(cursor)
print(time.time()-start_time_posts)
posts_list=[]
for node in posts:
pass
posts_list.append(json.loads(JSONEncoder().encode(node)))
posts=pd.DataFrame(posts_list)
print("Posts--- %s seconds ---" % (time.time() - start_time_posts))
message="Posts--- %s seconds ---" % (time.time() - start_time_posts)
logger(message)
start_time_users = time.time()
col = db.users
cursor = col.find()
users = list(cursor)
print(time.time()-start_time_users)
users_list=[]
for node in users:
pass
users_list.append(json.loads(JSONEncoder().encode(node)))
users= | pd.DataFrame(users_list) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pyapprox as pya
from scipy.stats import uniform
import json
import os
import time
from pyapprox.random_variable_algebra import product_of_independent_random_variables_pdf
from basic.boots_pya import fun
from basic.partial_rank import partial_rank
from basic.read_data import read_specify, file_settings
from basic.utils import sa_df_format
# # import the original parameter sets
def pya_boot_sensitivity(outpath, nboot, seed, product_uniform, filename):
variable, _ = read_specify('parameter', 'reduced', product_uniform, num_vars=11)
len_params = variable.num_vars()
samples, values = read_specify('model', 'reduced', product_uniform, num_vars=11)
# Adaptively increase the size of training dataset and conduct the bootstrap based partial ranking
n_strat, n_end, n_step = [104, 552, 13]
errors_cv_all = {}
partial_results = {}
total_effects_all = {}
approx_list_all = {}
for i in range(n_strat, n_end+1, n_step):
# for i in n_list:
if (n_end - i) < n_step:
i = n_end
np.random.seed(seed)
errors_cv, _, total_effects, approx_list = fun(
variable, samples[:, :i], values[:i], product_uniform, nboot=nboot)
# partial ranking
total_effects = np.array(total_effects)
sa_shape = list(total_effects.shape)[0:2]
total_effects = total_effects.reshape((sa_shape))
rankings = partial_rank(total_effects,len_params, conf_level=0.95)
partial_results[f'nsample_{i}'] = rankings
errors_cv_all[f'nsample_{i}'] = errors_cv
total_effects_all[f'nsample_{i}'] = total_effects
approx_list_all[f'nsample_{i}'] = approx_list
# End for
np.savez(f'{outpath}{filename}',errors_cv=errors_cv_all, sensitivity_indices=partial_results, total_effects=total_effects_all)
import pickle
pickle.dump(approx_list_all, open(f'{outpath}{filename[:-4]}-approx-list.pkl', "wb"))
# END pya_boot_sensitivity()
def run_pya(outpath, nboot, seed, product_uniform):
if product_uniform == 'beta':
dist_type = 'beta'
elif product_uniform == 'exact':
dist_type = 'exact'
else:
dist_type = 'uniform'
filename = f'adaptive-reduce-{dist_type}_552.npz'
print(f'{outpath}{filename}')
if not os.path.exists(f'{outpath}{filename}'):
pya_boot_sensitivity(
outpath, nboot, seed, product_uniform, filename)
fileread = np.load(f'{outpath}{filename}', allow_pickle=True)
errors_cv = fileread[fileread.files[0]][()]
sensitivity_indices = fileread[fileread.files[1]][()]
# Look the error change with the increase of sample size
errors_cv = | pd.DataFrame.from_dict(errors_cv) | pandas.DataFrame.from_dict |
import pandas as pd
import pytest
import numpy as np
import dask.dataframe as dd
from dask.dataframe.utils import assert_eq, PANDAS_VERSION
N = 40
df = pd.DataFrame(
{
"a": np.random.randn(N).cumsum(),
"b": np.random.randint(100, size=(N,)),
"c": np.random.randint(100, size=(N,)),
"d": np.random.randint(100, size=(N,)),
"e": np.random.randint(100, size=(N,)),
}
)
ddf = dd.from_pandas(df, 3)
idx = (
pd.date_range("2016-01-01", freq="3s", periods=100)
| pd.date_range("2016-01-01", freq="5s", periods=100)
)[:N]
ts = pd.DataFrame(
{
"a": np.random.randn(N).cumsum(),
"b": np.random.randint(100, size=(N,)),
"c": np.random.randint(100, size=(N,)),
"d": np.random.randint(100, size=(N,)),
"e": np.random.randint(100, size=(N,)),
},
index=idx,
)
dts = dd.from_pandas(ts, 3)
def shifted_sum(df, before, after, c=0):
a = df.shift(before)
b = df.shift(-after)
return df + a + b + c
def ts_shifted_sum(df, before, after, c=0):
a = df.shift(before.seconds)
b = df.shift(-after.seconds)
return df + a + b + c
@pytest.mark.parametrize("npartitions", [1, 4])
def test_map_overlap(npartitions):
ddf = dd.from_pandas(df, npartitions)
for before, after in [(0, 3), (3, 0), (3, 3), (0, 0)]:
# DataFrame
res = ddf.map_overlap(shifted_sum, before, after, before, after, c=2)
sol = shifted_sum(df, before, after, c=2)
assert_eq(res, sol)
# Series
res = ddf.b.map_overlap(shifted_sum, before, after, before, after, c=2)
sol = shifted_sum(df.b, before, after, c=2)
assert_eq(res, sol)
def test_map_overlap_names():
npartitions = 3
ddf = dd.from_pandas(df, npartitions)
res = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=2)
res2 = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=2)
assert set(res.dask) == set(res2.dask)
res3 = ddf.map_overlap(shifted_sum, 0, 3, 0, 3, c=3)
assert res3._name != res._name
# Difference is just the final map
diff = set(res3.dask).difference(res.dask)
assert len(diff) == npartitions
res4 = ddf.map_overlap(shifted_sum, 3, 0, 0, 3, c=2)
assert res4._name != res._name
def test_map_overlap_errors():
# Non-integer
with pytest.raises(ValueError):
ddf.map_overlap(shifted_sum, 0.5, 3, 0, 2, c=2)
# Negative
with pytest.raises(ValueError):
ddf.map_overlap(shifted_sum, 0, -5, 0, 2, c=2)
# Partition size < window size
with pytest.raises(NotImplementedError):
ddf.map_overlap(shifted_sum, 0, 100, 0, 100, c=2).compute()
# Offset with non-datetime
with pytest.raises(TypeError):
ddf.map_overlap(shifted_sum, pd.Timedelta("1s"), pd.Timedelta("1s"), 0, 2, c=2)
def test_map_overlap_provide_meta():
df = pd.DataFrame(
{"x": [1, 2, 4, 7, 11], "y": [1.0, 2.0, 3.0, 4.0, 5.0]}
).rename_axis("myindex")
ddf = dd.from_pandas(df, npartitions=2)
# Provide meta spec, but not full metadata
res = ddf.map_overlap(
lambda df: df.rolling(2).sum(), 2, 0, meta={"x": "i8", "y": "i8"}
)
sol = df.rolling(2).sum()
assert_eq(res, sol)
def mad(x):
return np.fabs(x - x.mean()).mean()
rolling_method_args_check_less_precise = [
("count", (), False),
("sum", (), False),
("mean", (), False),
("median", (), False),
("min", (), False),
("max", (), False),
("std", (), True),
("var", (), True),
("skew", (), True), # here and elsewhere, results for kurt and skew are
("kurt", (), True), # checked with check_less_precise=True so that we are
# only looking at 3ish decimal places for the equality check
# rather than 5ish. I have encountered a case where a test
# seems to have failed due to numerical problems with kurt.
# So far, I am only weakening the check for kurt and skew,
# as they involve third degree powers and higher
("quantile", (0.38,), False),
("apply", (mad,), False),
]
@pytest.mark.parametrize(
"method,args,check_less_precise", rolling_method_args_check_less_precise
)
@pytest.mark.parametrize("window", [1, 2, 4, 5])
@pytest.mark.parametrize("center", [True, False])
def test_rolling_methods(method, args, window, center, check_less_precise):
# DataFrame
prolling = df.rolling(window, center=center)
drolling = ddf.rolling(window, center=center)
if method == "apply":
kwargs = {"raw": False}
else:
kwargs = {}
assert_eq(
getattr(prolling, method)(*args, **kwargs),
getattr(drolling, method)(*args, **kwargs),
check_less_precise=check_less_precise,
)
# Series
prolling = df.a.rolling(window, center=center)
drolling = ddf.a.rolling(window, center=center)
assert_eq(
getattr(prolling, method)(*args, **kwargs),
getattr(drolling, method)(*args, **kwargs),
check_less_precise=check_less_precise,
)
if PANDAS_VERSION <= "0.25.0":
filter_panel_warning = pytest.mark.filterwarnings(
"ignore::DeprecationWarning:pandas[.*]"
)
else:
filter_panel_warning = lambda f: f
@filter_panel_warning
@pytest.mark.parametrize("window", [1, 2, 4, 5])
@pytest.mark.parametrize("center", [True, False])
def test_rolling_cov(window, center):
# DataFrame
prolling = df.drop("a", 1).rolling(window, center=center)
drolling = ddf.drop("a", 1).rolling(window, center=center)
assert_eq(prolling.cov(), drolling.cov())
# Series
prolling = df.b.rolling(window, center=center)
drolling = ddf.b.rolling(window, center=center)
assert_eq(prolling.cov(), drolling.cov())
def test_rolling_raises():
df = pd.DataFrame(
{"a": np.random.randn(25).cumsum(), "b": np.random.randint(100, size=(25,))}
)
ddf = dd.from_pandas(df, 3)
pytest.raises(ValueError, lambda: ddf.rolling(1.5))
pytest.raises(ValueError, lambda: ddf.rolling(-1))
pytest.raises(ValueError, lambda: ddf.rolling(3, min_periods=1.2))
pytest.raises(ValueError, lambda: ddf.rolling(3, min_periods=-2))
pytest.raises(ValueError, lambda: ddf.rolling(3, axis=10))
pytest.raises(ValueError, lambda: ddf.rolling(3, axis="coulombs"))
pytest.raises(NotImplementedError, lambda: ddf.rolling(100).mean().compute())
def test_rolling_names():
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(a.rolling(2).sum().dask) == sorted(a.rolling(2).sum().dask)
def test_rolling_axis():
df = pd.DataFrame(np.random.randn(20, 16))
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(df.rolling(3, axis=0).mean(), ddf.rolling(3, axis=0).mean())
assert_eq(df.rolling(3, axis=1).mean(), ddf.rolling(3, axis=1).mean())
assert_eq(
df.rolling(3, min_periods=1, axis=1).mean(),
ddf.rolling(3, min_periods=1, axis=1).mean(),
)
assert_eq(
df.rolling(3, axis="columns").mean(), ddf.rolling(3, axis="columns").mean()
)
assert_eq(df.rolling(3, axis="rows").mean(), ddf.rolling(3, axis="rows").mean())
s = df[3]
ds = ddf[3]
assert_eq(s.rolling(5, axis=0).std(), ds.rolling(5, axis=0).std())
def test_rolling_partition_size():
df = pd.DataFrame(np.random.randn(50, 2))
ddf = dd.from_pandas(df, npartitions=5)
for obj, dobj in [(df, ddf), (df[0], ddf[0])]:
assert_eq(obj.rolling(10).mean(), dobj.rolling(10).mean())
assert_eq(obj.rolling(11).mean(), dobj.rolling(11).mean())
with pytest.raises(NotImplementedError):
dobj.rolling(12).mean().compute()
def test_rolling_repr():
ddf = dd.from_pandas(pd.DataFrame([10] * 30), npartitions=3)
res = repr(ddf.rolling(4))
assert res == "Rolling [window=4,center=False,axis=0]"
def test_time_rolling_repr():
res = repr(dts.rolling("4s"))
assert res == "Rolling [window=4000000000,center=False,win_type=freq,axis=0]"
def test_time_rolling_constructor():
result = dts.rolling("4s")
assert result.window == "4s"
assert result.min_periods is None
assert result.win_type is None
assert result._win_type == "freq"
assert result._window == 4000000000 # ns
assert result._min_periods == 1
@pytest.mark.parametrize(
"method,args,check_less_precise", rolling_method_args_check_less_precise
)
@pytest.mark.parametrize("window", ["1S", "2S", "3S", pd.offsets.Second(5)])
def test_time_rolling_methods(method, args, window, check_less_precise):
# DataFrame
if method == "apply":
kwargs = {"raw": False}
else:
kwargs = {}
prolling = ts.rolling(window)
drolling = dts.rolling(window)
assert_eq(
getattr(prolling, method)(*args, **kwargs),
getattr(drolling, method)(*args, **kwargs),
check_less_precise=check_less_precise,
)
# Series
prolling = ts.a.rolling(window)
drolling = dts.a.rolling(window)
assert_eq(
getattr(prolling, method)(*args, **kwargs),
getattr(drolling, method)(*args, **kwargs),
check_less_precise=check_less_precise,
)
@filter_panel_warning
@pytest.mark.parametrize("window", ["1S", "2S", "3S", pd.offsets.Second(5)])
def test_time_rolling_cov(window):
# DataFrame
prolling = ts.drop("a", 1).rolling(window)
drolling = dts.drop("a", 1).rolling(window)
assert_eq(prolling.cov(), drolling.cov())
# Series
prolling = ts.b.rolling(window)
drolling = dts.b.rolling(window)
assert_eq(prolling.cov(), drolling.cov())
@pytest.mark.parametrize(
"window,N",
[("1s", 10), ("2s", 10), ("10s", 10), ("10h", 10), ("10s", 100), ("10h", 100)],
)
def test_time_rolling_large_window_fixed_chunks(window, N):
df = pd.DataFrame(
{
"a": pd.date_range("2016-01-01 00:00:00", periods=N, freq="1s"),
"b": np.random.randint(100, size=(N,)),
}
)
df = df.set_index("a")
ddf = dd.from_pandas(df, 5)
assert_eq(ddf.rolling(window).sum(), df.rolling(window).sum())
assert_eq(ddf.rolling(window).count(), df.rolling(window).count())
assert_eq(ddf.rolling(window).mean(), df.rolling(window).mean())
@pytest.mark.parametrize("window", ["2s", "5s", "20s", "10h"])
def test_time_rolling_large_window_variable_chunks(window):
df = pd.DataFrame(
{
"a": pd.date_range("2016-01-01 00:00:00", periods=100, freq="1s"),
"b": np.random.randint(100, size=(100,)),
}
)
ddf = dd.from_pandas(df, 5)
ddf = ddf.repartition(divisions=[0, 5, 20, 28, 33, 54, 79, 80, 82, 99])
df = df.set_index("a")
ddf = ddf.set_index("a")
assert_eq(ddf.rolling(window).sum(), df.rolling(window).sum())
assert_eq(ddf.rolling(window).count(), df.rolling(window).count())
assert_eq(ddf.rolling(window).mean(), df.rolling(window).mean())
@pytest.mark.parametrize("before, after", [("6s", "6s"), ("2s", "2s"), ("6s", "2s")])
def test_time_rolling(before, after):
window = before
before = pd.Timedelta(before)
after = | pd.Timedelta(after) | pandas.Timedelta |
from tools.crawler import SecCrawler
import re
import requests
import os
import sys
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
# add config.py file which contains https://www.worldtradingdata.com/ API key
from config.config import WTD_api_key, simfin_api_key
from ipdb import set_trace
def find_and_save_10K_to_folder(ticker, from_date=None, number_of_documents=40, doc_type='xbrl'):
if from_date is None:
from_date = datetime.today().strftime('%Y%m%d')
crawler = SecCrawler()
cik, company_name = get_cik_and_name_from_ticker(ticker)
crawler.filing_10K(ticker, cik, company_name, from_date,
number_of_documents, doc_type)
def find_and_save_10Q_to_folder(ticker, from_date=None, number_of_documents=40, doc_type='xbrl'):
if from_date is None:
from_date = datetime.today().strftime('%Y%m%d')
crawler = SecCrawler()
cik, company_name = get_cik_and_name_from_ticker(ticker)
crawler.filing_10Q(ticker, cik, company_name, from_date,
number_of_documents, doc_type)
def find_and_save_20F_to_folder(ticker, from_date=None, number_of_documents=40, doc_type='xbrl'):
if from_date is None:
from_date = datetime.today().strftime('%Y%m%d')
crawler = SecCrawler()
cik, company_name = get_cik_and_name_from_ticker(ticker)
crawler.filing_20F(ticker, cik, company_name, from_date,
number_of_documents, doc_type)
def get_cik_and_name_from_ticker(ticker):
URL = 'http://www.sec.gov/cgi-bin/browse-edgar?CIK=%s&Find=Search&owner=exclude&action=getcompany' % ticker
data = requests.get(URL).content.decode('utf-8')
CIK_RE = re.compile(r'.*CIK=(\d{10}).*')
cik_find = CIK_RE.findall(data)
if type(cik_find) == str:
pass
elif type(cik_find) == list:
cik_find = str(cik_find[0])
else:
print('could not find cik number...')
cik_find = None
name_RE = re.compile(r'companyName">(.+?)<')
name_find = name_RE.findall(data)
if type(name_find) == str:
pass
elif type(name_find) == list:
name_find = str(name_find[0])
else:
print('could not find company name...')
name_find = None
return cik_find, name_find
def get_reports_list(ticker, report_type='10-K', file_type='xbrl', data_folder='.\\SEC-Edgar-Data\\'):
report_type += '\\'
path = os.path.join(data_folder, ticker, report_type, file_type)
if not os.path.isdir(path):
print(f'could not find {ticker} folder')
sys.exit()
if file_type == 'xbrl':
files = [os.path.join(path, f) for f in os.listdir(
path) if re.match(r'.*[0-9]+.xml', f) or re.match(r'.*htm.xml', f)]
elif file_type == 'txt':
files = [os.path.join(path, f) for f in os.listdir(
path) if re.match(r'.*[0-9]+.txt', f)]
return files
def get_historical_stock_price(ticker, years=10, api='simfin'):
'''
use world trading data to get stock price history (need to have api key set in config.py file)
'''
start_date = (datetime.now() - timedelta(days=years*365)
).strftime('%Y-%m-%d')
if api == 'WTD':
request_url = 'https://www.worldtradingdata.com/api/v1/history?symbol=%s&sort=newest&api_token=%s&date_from=%s' % (
ticker, WTD_api_key, start_date)
content = requests.get(request_url)
data = content.json()
df = pd.DataFrame.from_dict(data['history'], orient='index')
df.index = pd.to_datetime(df.index)
df = df.apply(pd.to_numeric, errors='coerce')
elif api == "simfin":
request_url = "https://simfin.com/api/v1/info/find-id/ticker/%s?api-key=%s" % (
ticker, simfin_api_key)
content = requests.get(request_url)
data = content.json()
if "error" in data or len(data) < 1:
return None
else:
sim_id = data[0]['simId']
request_url = "https://simfin.com/api/v1/companies/id/%s/shares/prices?api-key=%s&start=%s" % (
sim_id, simfin_api_key, start_date)
content = requests.get(request_url)
data = content.json()
df = pd.DataFrame(data['priceData'])
df.rename(columns={'closeAdj': 'close'}, inplace=True)
df.set_index('date', inplace=True)
df.index = pd.to_datetime(df.index)
df = df.apply(pd.to_numeric, errors='coerce')
df = df.reindex(index=df.index[::-1])
return df
def estimate_stock_split_adjustments(stock_count):
'''
gets a series of stock prices, estimates if there were major stock splits
returns an adjusted stock_count
'''
multiplier = 1
counts = stock_count.values
adjusted_count = | pd.Series(0, index=stock_count.index) | pandas.Series |
import logging
import os
import re
import openpyxl
import pandas as pd
from Configs import getConfig
config = getConfig()
log = logging.getLogger(__name__)
log.addHandler(logging.StreamHandler())
log.setLevel(getattr(logging, config.LOG_LEVEL))
def parse_targets(ttd_target_download_file):
pattern = re.compile(r"^(T\d{5})\t(.*?)\t(.*)$")
target_main_keys = ['Name', 'UniProt ID', 'Type of Target',
'EC Number', 'BioChemical Class', 'Function', 'Target Validation']
drug_modes_of_action = ['Modulator', 'Inhibitor', 'Agonist', 'Antagonist', 'Binder', 'Activator', 'Stimulator', 'Cofactor', 'Modulator (allosteric modulator)',
'Blocker', 'Blocker (channel blocker)', 'Inducer', 'Inhibitor (gating inhibitor)', 'Suppressor', 'Regulator (upregulator)',
'Breaker', 'Immunomodulator', 'Regulator', 'Opener', 'Stabilizer', 'Enhancer', 'Binder (minor groove binder)', 'Intercalator',
'Immunomodulator (Immunostimulant)', 'Stablizer'
]
prev_target_id = None
targets = dict()
with open(ttd_target_download_file, 'rt') as in_file:
for linenum, line in enumerate(in_file):
match = pattern.search(line)
if match != None:
target_id = match.group(1)
key = match.group(2)
value = match.group(3)
#value = value.split("\t")
if prev_target_id != target_id:
if prev_target_id in targets.keys():
print("Error: Target {0} already added".format(prev_target_id))
elif prev_target_id != None:
targets[prev_target_id] = target
# initialize new target dict
target = {'ID': target_id}
# create drug key to store mode of action
if key == "DRUGINFO":
(ttd_drug_id, drug_name, drug_status) = value.split("\t")
if "DRUGINFO" not in target:
target["DRUGINFO"] = dict()
target["DRUGINFO"][ttd_drug_id] = [drug_name, drug_status]
# check if current key is a drug mode of action
elif key in drug_modes_of_action:
# set drug mode of action
drug_key = [k for k, v in target["Drugs"].items() if k.lower() == value.lower()]
if len(drug_key) == 1:
target["Drugs"][drug_key[0]] = key
target["Drug"].append(drug_key[0])
target["DrugMethod"].append(key)
target["DrugStatus"].append()
else:
print("Error: Drug {0} not found in target.".format(value))
elif key in target:
if type(target[key]) != list:
target[key] = [target[key]]
if value not in target[key]:
target[key].append(value)
else:
target[key] = value
prev_target_id = target_id
targets[target_id] = target
return targets
def parse_drug(ttd_drug_download_file):
key_map = {
"TRADNAME": "trade_name",
"DRUGCOMP": "company",
"THERCLAS": "therapeutic_class",
"DRUGTYPE": "drug_type",
"DRUGINCH": "inchi",
"DRUGINKE": "inchikey",
"DRUGSMIL": "drug_smiles",
"HIGHSTAT": "highest_stat",
"DRUGCLAS": "drug_class",
"DRUADIID": "drug_adi_id",
"COMPCLAS": "compound_class"
}
pattern = re.compile(r"^(D.*)\t(.*?)\t(.*)$")
prev_drug_id = None
drugs = {}
drug = {}
with open(ttd_drug_download_file, 'rt') as in_file:
for linenum, line in enumerate(in_file):
line = line.strip()
match = pattern.search(line)
if match != None:
drug_id = match.group(1)
key = match.group(2)
value = match.group(3)
#print("Drug id = {0}, key = {1}, value = {2}".format(drug_id, key, value))
if prev_drug_id != drug_id:
if prev_drug_id in drugs.keys():
print("Error: drug {} already added".format(prev_drug_id))
else:
drugs[prev_drug_id] = drug
drug = {'ttd_id': drug_id}
if key != 'DRUG__ID':
drug[key_map[key]] = value
prev_drug_id = drug_id
drugs[drug_id] = drug
return drugs
def post_process_value(key, value):
if key == "cas_number":
return value.replace("CAS ", "")
else:
return value
def parse_drug_xref(ttd_drug_xref_file):
key_map = {
"DRUGNAME": "name",
"CASNUMBE": "cas_number",
"D_FOMULA": "drug_formula",
"PUBCHCID": "pubchem_cid",
"PUBCHSID": "pubchem_sid",
"CHEBI_ID": "chebi_id",
"SUPDRATC": "superdrug_atc",
"SUPDRCAS": "superdrug_cas"
}
pattern = re.compile(r"^(D.*)\t(.*?)\t(.*)$")
prev_drug_id = None
drug_xrefs = {}
drug = {}
with open(ttd_drug_xref_file, 'rt') as in_file:
for linenum, line in enumerate(in_file):
line = line.strip()
match = pattern.search(line)
if match != None:
drug_id = match.group(1)
key = match.group(2)
value = match.group(3)
#print("Drug id = {0}, key = {1}, value = {2}".format(drug_id, key, value))
if prev_drug_id != drug_id:
if prev_drug_id in drug_xrefs.keys():
print("Error: drug {} already added".format(prev_drug_id))
else:
drug_xrefs[prev_drug_id] = drug
drug = {'ttd_id': drug_id}
if key != 'TTDDRUID':
mapped_key = key_map[key]
drug[mapped_key] = post_process_value(mapped_key, value)
prev_drug_id = drug_id
drug_xrefs[drug_id] = drug
return drug_xrefs
def parse_drug_targets(drug_target_map_download_file):
wb_obj = openpyxl.load_workbook(drug_target_map_download_file)
sheet = wb_obj.active
drug_targets = []
for i, row in enumerate(sheet.iter_rows(values_only=True)):
if i == 0:
continue
drug_target = {}
drug_target['target_id'] = row[0]
drug_target['drug_id'] = row[2]
drug_target['moa'] = row[5]
drug_target['activity'] = row[6]
drug_target['reference'] = row[7]
if drug_target['moa'] == ".":
drug_target['moa'] = ""
drug_targets.append(drug_target)
return drug_targets
def load_ttd_compound_file(drug_download_file, drug_xref_file, drug_output_file):
drug = parse_drug(drug_download_file)
drug_xref = parse_drug_xref(drug_xref_file)
drug_df = pd.DataFrame.from_dict(drug, orient='index')
drug_xref_df = pd.DataFrame.from_dict(drug_xref, orient='index')
# drugbank columns
# columns = ['ttd_id', 'name', 'type', 'groups', 'cas_number', 'atc_codes', 'categories', 'inchikey', 'inchi', 'description', 'indication', 'mechanism',
# 'chebi_id', 'pubchem_id', 'kegg_id', 'kegg_drug_id', 'chemspider_id', 'license', 'source']
drug_merged_df = drug_df.join(drug_xref_df, on="ttd_id", rsuffix="_xref")
drug_merged_df['source_id'] = drug_merged_df['ttd_id']
drug_merged_df['source'] = 'TTD'
drug_merged_df['source_url'] = "http://db.idrblab.net/ttd/data/drug/details/" + drug_merged_df["ttd_id"]
drug_merged_df['license'] = 'CC 1.0'
drug_merged_df.to_csv(drug_output_file, sep='\t', index=False)
return drug_merged_df
def load_target_file(target_download_file, target_output_file):
targets = parse_targets(target_download_file)
columns = ["ID", "TARGNAME", "UNIPROID", "TARGTYPE", "BIOCLASS",
"ECNUMBER", "FUNCTION", "GENENAME", "SEQUENCE", "SYNONYMS"]
targets_df = | pd.DataFrame.from_dict(targets, orient='index') | pandas.DataFrame.from_dict |
import os
import json
import hashlib
import logging
import datetime
import contextlib
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy import select, column, text, desc
from sqlalchemy.exc import OperationalError
from koapy import KiwoomOpenApiContext
from koapy.backend.cybos.CybosPlusComObject import CybosPlusComObject
from koapy.config import config
from koapy.utils.krx.calendar import get_last_krx_close_datetime
from send2trash import send2trash
# TODO: 수정주가로 받으면서 append 하는 경우 과거 데이터에 대한 추가보정이 별도로 필요함
# TODO: InfluxDB 지원 추가 검토
class ChartType:
DAY = 1
WEEK = 2
MONTH = 3
MINUTE = 4
TICK = 5
DAY_STRINGS = ['day', 'daily', 'date', 'd', 'D']
WEEK_STRINGS = ['week', 'weekly', 'w', 'W']
MONTH_STRINGS = ['month', 'monthly', 'mon', 'M']
MINUTE_STRINGS = ['minute', 'min', 'm']
TICK_STRINGS = ['tick', 't', 'T']
FROM_STRING_DICT = {}
TO_STRING_DICT = {}
for enum, strings in zip(
[DAY, WEEK, MONTH, MINUTE, TICK],
[DAY_STRINGS, WEEK_STRINGS, MONTH_STRINGS, MINUTE_STRINGS, TICK_STRINGS]):
TO_STRING_DICT[enum] = strings[0]
for string in strings:
FROM_STRING_DICT[string] = enum
@classmethod
def from_string(cls, value):
if value not in cls.FROM_STRING_DICT:
raise ValueError('unsupported chart type %s' % value)
return cls.FROM_STRING_DICT[value]
@classmethod
def to_string(cls, value):
if value not in cls.TO_STRING_DICT:
raise ValueError('unsupported chart type %s' % value)
return cls.TO_STRING_DICT[value]
class FileFormat:
XLSX = 1
SQLITE = 2
XLSX_STRINGS = ['xlsx', 'excel']
SQLITE_STRINGS = ['sqlite3', 'sqlite']
FROM_STRING_DICT = {}
TO_STRING_DICT = {}
for enum, strings in zip(
[XLSX, SQLITE],
[XLSX_STRINGS, SQLITE_STRINGS]):
TO_STRING_DICT[enum] = strings[0]
for string in strings:
FROM_STRING_DICT[string] = enum
@classmethod
def from_string(cls, value):
if value not in cls.FROM_STRING_DICT:
raise ValueError('unsupported file format %s' % value)
return cls.FROM_STRING_DICT[value]
@classmethod
def to_string(cls, value):
if value not in cls.TO_STRING_DICT:
raise ValueError('unsupported file format %s' % value)
return cls.TO_STRING_DICT[value]
class IfExists:
"""
if_exists:
- force = always overwrite
- auto = append if exists, create if not exists, overwrite if invalid
- append = append if exists, create if not exists, ignore if invalid
- ignore = ignore if exists, create if not exists
"""
AUTO = 1
APPEND = 2
IGNORE = 3
FORCE = 4
AUTO_STRINGS = ['auto']
APPEND_STRINGS = ['append']
IGNORE_STRINGS = ['ignore']
FORCE_STRINGS = ['force']
FROM_STRING_DICT = {}
TO_STRING_DICT = {}
for enum, strings in zip(
[AUTO, APPEND, IGNORE, FORCE],
[AUTO_STRINGS, APPEND_STRINGS, IGNORE_STRINGS, FORCE_STRINGS]):
TO_STRING_DICT[enum] = strings[0]
for string in strings:
FROM_STRING_DICT[string] = enum
@classmethod
def from_string(cls, value):
if value not in cls.FROM_STRING_DICT:
raise ValueError('unsupported if exists %s' % value)
return cls.FROM_STRING_DICT[value]
@classmethod
def to_string(cls, value):
if value not in cls.TO_STRING_DICT:
raise ValueError('unsupported if exists %s' % value)
return cls.TO_STRING_DICT[value]
class HistoricalStockPriceDataUpdater:
def __init__(self, codes, datadir, chart_type='daily', interval=1, file_format='xlsx',
if_exists='auto', delete_remainings=True, context=None):
self._codes = codes
self._datadir = datadir
self._chart_type = ChartType.from_string(chart_type)
self._interval = interval
self._format = FileFormat.from_string(file_format)
self._extension = '.' + FileFormat.to_string(self._format)
self._if_exists = IfExists.from_string(if_exists)
self._delete_remainings = delete_remainings
self._context = context
self._codes_len = len(self._codes)
failover_hash = hashlib.md5()
failover_hash_data = (self.__class__.__name__, sorted(self._codes), self._datadir)
failover_hash_data = json.dumps(failover_hash_data).encode()
failover_hash.update(failover_hash_data)
failover_hash = failover_hash.hexdigest()
self._failover_filename = 'koapy_failover_%s.txt' % failover_hash[:6]
self._date_column_name = '일자'
self._date_format = '%Y%m%d'
self._time_column_name = '시간'
self._time_format = '%H%M'
self._tablename = 'history'
def get_start_date(self):
return get_last_krx_close_datetime()
def check_failover_code(self):
failover_code = None
if os.path.exists(self._failover_filename):
with open(self._failover_filename) as f:
failover_code = f.read()
if not len(failover_code) > 0:
failover_code = None
return failover_code
def save_failover_code(self, code):
with open(self._failover_filename, 'w') as f:
f.write(code)
@contextlib.contextmanager
def get_or_create_context(self):
with contextlib.ExitStack() as stack:
if self._context is None:
default_context = config.get('koapy.data.updater.default_context')
if default_context == 'koapy.backend.cybos.CybosPlusComObject.CybosPlusComObject':
logging.debug('Using CybosPlus backend')
default_context = CybosPlusComObject()
else:
if default_context != 'koapy.context.KiwoomOpenApiContext.KiwoomOpenApiContext':
logging.warning('Unexpected default context %s, defaults to KiwoomOpenApiContext.', default_context)
logging.debug('Using Kiwoom OpenAPI backend')
default_context = KiwoomOpenApiContext()
self._context = stack.enter_context(default_context)
def unset_context():
self._context = None
stack.callback(unset_context)
else:
logging.debug('Using existing given context of type %s', type(self._context))
if isinstance(self._context, KiwoomOpenApiContext):
if self._chart_type == ChartType.DAY:
self._date_column_name = '일자'
self._date_format = '%Y%m%d'
elif self._chart_type == ChartType.MINUTE:
self._date_column_name = '체결시간'
self._date_format = '%Y%m%d%H%M%S'
else:
raise ValueError
elif isinstance(self._context, CybosPlusComObject):
if self._chart_type == ChartType.DAY:
self._date_column_name = '날짜'
self._date_format = '%Y%m%d'
elif self._chart_type == ChartType.MINUTE:
self._date_column_name = '날짜'
self._date_format = '%Y%m%d'
self._time_column_name = '시간'
self._time_format = '%H%M'
else:
raise ValueError
else:
raise TypeError
self._context.EnsureConnected()
yield self._context
def get_filepath_for_code(self, code):
filename = code + self._extension
filepath = os.path.join(self._datadir, filename)
return filepath
def remove_file(self, filepath):
return send2trash(filepath)
def _read_data_excel(self, filepath):
return | pd.read_excel(filepath, dtype=str) | pandas.read_excel |
import numpy as np
import pandas as pd
import pytest
from temporis.transformation.features.imputers import (ForwardFillImputer,
MeanImputer,
MedianImputer,
RemoveInf)
class TestImputers():
def test_PandasRemoveInf(self):
remover = RemoveInf()
df = pd.DataFrame({
'a': [0, np.inf, -np.inf, 0.1, 0.9, 15, 0.5, 0.3, 0.5],
'b': [5, 6, 7, 5, np.inf, 5, 6, 5, 45],
})
assert not pd.isnull(df['a'][1])
assert not pd.isnull(df['a'][2])
assert not pd.isnull(df['b'][4])
df_new = remover.fit_transform(df)
assert | pd.isnull(df_new['a'][1]) | pandas.isnull |
import pandas as pd
from bs4 import BeautifulSoup
import requests
# For player ranks
url = 'http://www.cricmetric.com/ipl/ranks/'
pd.read_html(requests.get(url).content)[-1].to_csv("./Dataset/_player_rank.csv", index=False, header=None)
# Store the sum of EF score by team.
data = pd.read_csv('./Dataset/_player_rank.csv')
team_rank = pd.DataFrame(
data.groupby('Team')['EFscore'].agg(['sum']).reset_index().sort_values('sum', ascending=False)).to_csv(
'./Dataset/_team_rank.csv', index=False)
# For news Headlines
url = 'https://sports.ndtv.com/indian-premier-league-2018/news'
page_response = requests.get(url)
page_content = BeautifulSoup(page_response.content, "html.parser")
pd.DataFrame(page_content.find_all(class_='menutitle')).to_csv("./Dataset/_news.txt", index=False, header=None)
clean_df = pd.read_csv('./Dataset/_news.txt', header=None).replace('<[^>]+>', '', regex=True)
| pd.DataFrame(clean_df) | pandas.DataFrame |
# pylint: disable=redefined-outer-name,protected-access
# pylint: disable=missing-function-docstring,missing-module-docstring,missing-class-docstring
# pylint: disable=global-statement
import pandas as pd
import param
import pytest
from bokeh.models.sources import ColumnDataSource
from awesome_panel_extensions.widgets.dataframe_base import (
DataFrameWithStreamAndPatchBaseWidget as DFWidget,
)
VALUE_CHANGED_COUNT = 0
# region value
@pytest.fixture
def data():
return {"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"], "z": [True, False, True, False]}
@pytest.fixture
def dataframe(data):
return pd.DataFrame(data)
def test_constructor(dataframe):
# When
component = DFWidget(value=dataframe)
# Then
assert component.value is dataframe
assert isinstance(component._source, ColumnDataSource)
pd.testing.assert_frame_equal(component._source.to_df(), dataframe.reset_index())
def test_constructor_no_value():
# When
component = DFWidget()
# Then
assert isinstance(component._source, ColumnDataSource)
def test_change_value(dataframe):
# Given
component = DFWidget()
# When
component.value = dataframe
# Then
pd.testing.assert_frame_equal(component._source.to_df(), dataframe.reset_index())
# endregion value
# region stream
def test_stream_dataframe_dataframe_value():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = DFWidget(value=value)
stream_value = pd.DataFrame({"x": [3, 4], "y": ["c", "d"]})
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*_):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When
tabulator.stream(stream_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(tabulator_source_df, expected)
assert VALUE_CHANGED_COUNT == 1
def test_stream_dataframe_series_value():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = DFWidget(value=value)
stream_value = pd.DataFrame({"x": [3, 4], "y": ["c", "d"]}).loc[1]
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*_):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When
tabulator.stream(stream_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [1, 2, 4], "y": ["a", "b", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(
tabulator_source_df, expected, check_column_type=False, check_dtype=False
)
assert VALUE_CHANGED_COUNT == 1
def test_stream_dataframe_dictionary_value_multi():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = DFWidget(value=value)
stream_value = {"x": [3, 4], "y": ["c", "d"]}
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*_):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When PROVIDING A DICTIONARY OF COLUMNS
tabulator.stream(stream_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(
tabulator_source_df, expected, check_column_type=False, check_dtype=False
)
assert VALUE_CHANGED_COUNT == 1
def test_stream_dataframe_dictionary_value_single():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = DFWidget(value=value)
stream_value = {"x": 4, "y": "d"}
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*_):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When PROVIDING A DICTIONARY ROW
tabulator.stream(stream_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [1, 2, 4], "y": ["a", "b", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(
tabulator_source_df, expected, check_column_type=False, check_dtype=False
)
assert VALUE_CHANGED_COUNT == 1
# endregion Stream
# region Patch
def test_patch_dataframe_dataframe_value():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = DFWidget(value=value)
patch_value = pd.DataFrame({"x": [3, 4], "y": ["c", "d"]})
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*_):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When
tabulator.patch(patch_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [3, 4], "y": ["c", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(tabulator_source_df, expected)
assert VALUE_CHANGED_COUNT == 1
# endregion Patch
def test_patch_from_partial_dataframe():
data = | pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]}) | pandas.DataFrame |
import geopandas as gp
import numpy as np
import pandas as pd
import pytest
import shapely
import xarray as xr
from regionmask import Regions, from_geopandas, mask_3D_geopandas, mask_geopandas
from regionmask.core._geopandas import (
_check_duplicates,
_construct_abbrevs,
_enumerate_duplicates,
)
from .utils import dummy_ds, dummy_region, expected_mask_2D, expected_mask_3D
@pytest.fixture
def geodataframe_clean():
numbers = [0, 1, 2]
names = ["Unit Square1", "Unit Square2", "Unit Square3"]
abbrevs = ["uSq1", "uSq2", "uSq3"]
d = dict(
names=names, abbrevs=abbrevs, numbers=numbers, geometry=dummy_region.polygons
)
return gp.GeoDataFrame.from_dict(d)
@pytest.fixture
def geodataframe_missing():
numbers = [1, None, None]
names = ["Unit Square1", None, None]
abbrevs = ["uSq1", None, None]
d = dict(
names=names, abbrevs=abbrevs, numbers=numbers, geometry=dummy_region.polygons
)
return gp.GeoDataFrame.from_dict(d)
@pytest.fixture
def geodataframe_duplicates():
numbers = [1, 1, 1]
names = ["Unit Square", "Unit Square", "Unit Square"]
abbrevs = ["uSq", "uSq", "uSq"]
d = dict(
names=names, abbrevs=abbrevs, numbers=numbers, geometry=dummy_region.polygons
)
return gp.GeoDataFrame.from_dict(d)
def test_from_geopandas_wrong_input():
with pytest.raises(
TypeError, match="`geodataframe` must be a geopandas 'GeoDataFrame'"
):
from_geopandas(None)
def test_from_geopandas_use_columns(geodataframe_clean):
result = from_geopandas(
geodataframe_clean,
numbers="numbers",
names="names",
abbrevs="abbrevs",
name="name",
source="source",
)
assert isinstance(result, Regions)
assert result.polygons[0].equals(dummy_region.polygons[0])
assert result.polygons[1].equals(dummy_region.polygons[1])
assert result.polygons[2].equals(dummy_region.polygons[2])
assert result.numbers == [0, 1, 2]
assert result.names == ["Unit Square1", "Unit Square2", "Unit Square3"]
assert result.abbrevs == ["uSq1", "uSq2", "uSq3"]
assert result.name == "name"
assert result.source == "source"
def test_from_geopandas_default(geodataframe_clean):
result = from_geopandas(geodataframe_clean)
assert isinstance(result, Regions)
assert result.polygons[0].equals(dummy_region.polygons[0])
assert result.polygons[1].equals(dummy_region.polygons[1])
assert result.polygons[2].equals(dummy_region.polygons[2])
assert result.numbers == [0, 1, 2]
assert result.names == ["Region0", "Region1", "Region2"]
assert result.abbrevs == ["r0", "r1", "r2"]
assert result.name == "unnamed"
assert result.source is None
@pytest.mark.parametrize("arg", ["names", "abbrevs", "numbers"])
def test_from_geopandas_missing_error(geodataframe_missing, arg):
with pytest.raises(ValueError, match=f"{arg} cannot contain missing values"):
from_geopandas(geodataframe_missing, **{arg: arg})
@pytest.mark.parametrize("arg", ["names", "abbrevs", "numbers"])
def test_from_geopandas_duplicates_error(geodataframe_duplicates, arg):
with pytest.raises(ValueError, match=f"{arg} cannot contain duplicate values"):
from_geopandas(geodataframe_duplicates, **{arg: arg})
@pytest.mark.parametrize("arg", ["names", "abbrevs", "numbers"])
def test_from_geopandas_column_missing(geodataframe_clean, arg):
with pytest.raises(KeyError):
from_geopandas(geodataframe_clean, **{arg: "not_a_column"})
series_duplicates = | pd.Series([1, 1, 2, 3, 4]) | pandas.Series |
"""Module to perform QC on the xiRT performance."""
import glob
import logging
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import palettable
import pandas as pd
import seaborn as sns
import statannot
from matplotlib import ticker
from matplotlib.lines import Line2D
from scipy.stats import pearsonr
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, classification_report
sns.set(context="notebook", style="white", palette="deep", font_scale=1)
# constants
hSAX_color = "#9b59b6"
SCX_color = "#3498db"
RP_color = "#e74c3c"
colors = [hSAX_color, SCX_color, RP_color, "C1", "C2", "C7"]
colormaps = [plt.cm.Purples, plt.cm.Blues, plt.cm.Reds,
plt.cm.Oranges, plt.cm.Greens, plt.cm.Greys]
targetdecoys_cm = sns.xkcd_palette(["faded green", "orange", "dark orange"])
TOTAL_color = palettable.cartocolors.qualitative.Bold_6.mpl_colors[1:3][1]
new_rc_params = {'text.usetex': False, "svg.fonttype": 'none'}
mpl.rcParams.update(new_rc_params)
logger = logging.getLogger(__name__)
def encode_pval(pvalue):
"""
Given a p-value returns a significant annotation of the float in * decoding.
According to: * <= 0.05, ** <= 0.01, *** <= 0.001, **** <= 0.0001
Args:
pvalue: float, p-value
Returns:
str, annotation
"""
reference_pval = np.array([0.0001, 0.001, 0.01, 0.05, 1])
reference_str = ["****", "****", "***", "**", "*", "ns"]
return reference_str[np.where(reference_pval > pvalue)[0][0]]
def statistical_annotation(x1, x2, yvalues, txt, ax): # pragma: no cover
"""
Add annotation to axes plot.
Args:
x1:
x2:
yvalues:
txt: str, text to add
ax: axes object, matplotlib axes
Returns:
None
"""
# statistical annotation
y, h, col = yvalues.max() + 2, 2, 'k'
ax.plot([x1, x1, x2, x2], [y, y + h, y + h, y], lw=1.5, c=col)
ax.text((x1 + x2) * .5, y + h, txt, ha='center', va='bottom', color=col)
def add_heatmap(y, yhat, task, ax, colormap, dims): # pragma: no cover
"""Generate a heatmap to visualize classification results and return plot to given axes.
Args:
y: ar-like, observed
yhat: ar-like, predicted
task: ar-like, task names
ax: axes, matplotlib axes
colormap: colormap, plt.cm colormap instance
dims: ar-like, unique dimensions for the input data
Returns:
axes, matplotlib axes object
"""
# prepare confusion matrix
# note that yhat and y are change in the order here! This is needed because sklearn
# arranges the confusion matrix very confusing ... this way bottom left, to top right
# will equal a correaltion / straight line = expected behavior
cm_scx = pd.DataFrame(np.flip(confusion_matrix(yhat, y), axis=0))
cm_scx.columns = cm_scx.columns
cm_scx.index = np.flip(cm_scx.columns)
# mask for not writing zeroes into the plot
mask = cm_scx <= 0
cm_scx[mask] = np.nan
# annotation
metric_str = """r2: {:.2f} f1: {:.2f} acc: {:.2f} racc: {:.2f}""".format(
custom_r2(y, yhat), f1_score(y, yhat, average="macro"),
accuracy_score(y, yhat), relaxed_accuracy(y, yhat))
metric_str = """r2: {:.2f} f1: {:.2f} acc: {:.2f} racc: {:.2f}""".format(
custom_r2(y, yhat), f1_score(y, yhat, average="macro"),
accuracy_score(y, yhat), relaxed_accuracy(y, yhat))
logger.info("QC: {}".format(task))
logger.info("Metrics: {}".format(metric_str))
ax = sns.heatmap(cm_scx, cmap=colormap, annot=True, annot_kws={"size": 12},
fmt='.0f', cbar=True, mask=mask, ax=ax)
ax.axhline(y=dims[-1], color='k')
ax.axvline(x=0, color='k')
ax.set(ylim=(cm_scx.shape[0], 0), xlabel="Observed {}\n".format(task),
title="""{}\n{}""".format(task, metric_str), ylabel="Predicted {}".format(task))
sns.despine()
return ax
def add_scatter(y, yhat, task, ax, color): # pragma: no cover
"""Generate a scatter plot to visualize prediction results and return plot to given axes.
Args:
y: ar-like, observed
yhat: ar-like, predicted
task: ar-like, task names
ax: axes, matplotlib axes
color: color, Either named color RGB for usage in matplotlib / seaborn.
Returns:
axes, matplotlib axes object
"""
# get min, max for plotting
xmin, xmax = np.hstack([y, yhat]).min(), np.hstack([y, yhat]).max()
xmin = xmin - 0.1 * xmin
xmax = xmax + 0.1 * xmax
metric_str = """r2: {:.2f} """.format(custom_r2(y, yhat))
metric_str = """r2: {:.2f} """.format(custom_r2(y, yhat))
logger.info("QC: {}".format(task))
logger.info("Metrics: {}".format(metric_str))
ax.scatter(y, yhat, facecolor="none", edgecolor=color)
ax.set(title=metric_str, xlabel="Observed {}".format(task.upper()),
ylabel="Predicted {}".format(task), xlim=(xmin, xmax), ylim=(xmin, xmax))
ax.yaxis.set_major_locator(ticker.MaxNLocator(5))
sns.despine()
return ax
def save_fig(fig, path, outname): # pragma: no cover
"""Save a figure in png/svg/pdf format and 600 dpi.
Args:
fig: figure object, matplotlib figure object
path: str, path to store the result
Returns:
None
"""
# fig.savefig(os.path.join(path, outname + ".png"), dpi=600)
fig.savefig(os.path.join(path, outname + ".pdf"), dpi=600)
fig.savefig(os.path.join(path, outname + ".svg"), dpi=600)
def custom_r2(y, yhat):
"""Compute the r2 value.
Args:
y: ar-like, observed
yhat: ar-like, predicted
Returns:
float, r2 value
"""
return np.round(pearsonr(y, yhat)[0] ** 2, 2)
def relaxed_accuracy(y, yhat):
"""Compute the relaxed accuracy (within an error margin of 1).
Args:
y: ar-like, observed values
yhat: ar-like, predicted values
Returns:
Float, relaxed accuracy (error +- 1
"""
return np.round(sum(np.abs(y - yhat) <= 1) / len(yhat), 2)
def plot_epoch_cv(callback_path, tasks, xirt_params, outpath, show=False): # pragma: no cover
"""Do a QC plot of the metrics and losses for all tasks across the epochs (over all cvs).
Args:
callback_path: str, path to the callback logs
tasks: ar-like, list of tasks that were used as identifier in xirt
xirt_params: dict, parsed yaml file for xirt config.
outpath: str, location to store the plots
show: bool, if True plot figure
Returns:
None
"""
# %% # read epoch log from callbacks
epochlogs = glob.glob(os.path.join(callback_path, "*epochlog*"))
df = pd.concat( | pd.read_csv(i) | pandas.read_csv |
import pandas as pd
import numpy as np
from .utility_fxns import distribute
def generate_id_dict(id_list, prod_ids, df):
''' docstring for generate_id_dict
input: product id list
output: dictionary of
key: product id
values: [position of product id in full matrix
, number of skus
, sku product ids]'''
id_dict = {}
for i in prod_ids:
pos = id_list.index(i)
j = 1
sku_ids = []
flag = True
while flag:
step = pos + j
if (df.item_type[step] == 'Product') & (j == 1):
j = 0
flag = False
elif df.item_type[step] == 'Product':
j -= 1
flag = False
elif df.item_type[step] == 'SKU':
j += 1
sku_ids.append(df.product_id[step])
else:
# not a product or sku
j = 0
flag = False
id_dict[i] = [pos, j, sku_ids]
return id_dict
def sku_combo_dicts_v2(file_list):
'''docstring for sku_combo_dicts'''
file = 'color_table.csv'
filename = [i for i in file_list if file in i][0]
df = pd.read_csv(filename)
cols = ['dg', 'uf', 'ff']
color_dicts = {}
for col in cols:
ndf = df.loc[df[col] == True]
color_dicts[col + '_color_dict'] = {
i: [j, k]
for (i, j, k) in zip(range(len(ndf)), ndf.color, ndf.new_code)
}
file = 'weight_table.csv'
filename = [i for i in file_list if file in i][0]
df = | pd.read_csv(filename) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by <NAME> for
Low runoff variability driven by a dominance of snowmelt inhibits clear coupling of climate, tectonics, and topography in the Greater Caucasus Mountains
If you use this code or derivatives, please cite the original paper.
"""
import pandas as pd
import numpy as np
from scipy import odr
import matplotlib.pyplot as plt
import stochastic_threshold as stim
######################
#### Read In Data ####
######################
df= | pd.read_csv('data_tables/gc_ero_master_table.csv') | pandas.read_csv |
#!/usr/bin/env python
import requests
import numpy as np
import pandas as pd
import json
import pytz
def _GET(url):
req = requests.get(url)
data = req.json()
return data
def _get_data(url):
raw = _GET(url)
return json.dumps(raw['Data'])
def _get_dowjones_data(url):
raw = _GET(url)
map = raw['Time Series (Daily)']
matrix = [{"time": key,
"close": float(val['4. close']),
"open": float(val['1. open']),
"high": float(val['2. high']),
"low": float(val['3. low']),
"volumefrom": float(val['5. volume']),
"volumeto": float(val['5. volume'])} for key, val in map.items()]
return json.dumps(matrix)
def _get_prices(data, prefix='usd_', prefixed_cols=['close', 'open', 'high', 'low', 'volumefrom', 'volumeto']):
aux = | pd.read_json(data, convert_dates=['time']) | pandas.read_json |
import pandas
import dateutil
import datetime
import seaborn
import matplotlib
import matplotlib.pylab
import databaseAccess
import matplotlib.pyplot as plt
# py -c 'import visualiseData; visualiseData.getFastestTimes()'
def getFastestTimes():
splits = databaseAccess.getSplits()
activities = splits[['activity_date', 'distance', 'elapsed_time']]
produceFastest1k(activities)
months=[]
max_date = datetime.datetime.strptime((datetime.datetime.strptime(splits['activity_date'].max(),"%Y-%m-%dT%H:%M:%SZ")).strftime('%Y%m'),'%Y%m')
min_date = datetime.datetime.strptime((datetime.datetime.strptime(splits['activity_date'].min(),"%Y-%m-%dT%H:%M:%SZ")).strftime('%Y%m'),'%Y%m')
months.append(min_date)
while min_date <= max_date:
min_date = min_date + dateutil.relativedelta.relativedelta(months=1)
months.append(min_date)
def produceFastest1k(activities):
pandas.options.mode.chained_assignment = None
activities['activity_date'] = [datetime.datetime.strptime((datetime.datetime.strptime(x,"%Y-%m-%dT%H:%M:%SZ")).strftime('%Y%m'),'%Y%m') for x in activities['activity_date']]
activities['distance'] = activities['distance'].astype(float)
activities['elapsed_time'] = activities['elapsed_time'].astype(float)
activities.set_index(['activity_date'], inplace=True)
fastestSplits = activities['elapsed_time'].groupby('activity_date').agg(elapsed_time=('min')).reset_index()
base = datetime.datetime(1970, 1, 1, 0, 0, 0)
times = [base + datetime.timedelta(seconds=x) for x in fastestSplits['elapsed_time']]
dates = fastestSplits['activity_date']
x = matplotlib.dates.date2num(dates)
y = matplotlib.dates.date2num(times)
matplotlib.pylab.plot_date(x, y, linestyle='', marker='o', markersize=5, alpha=0.1, color="blue")
matplotlib.pyplot.title('Fastest 1k Pace over Time', fontsize=18, fontweight="bold")
matplotlib.pyplot.xticks(fontsize=16, rotation='vertical')
matplotlib.pyplot.yticks(fontsize=16)
matplotlib.pyplot.xlabel('Month', fontsize=18)
matplotlib.pyplot.ylabel('Pace (km / hh:mm:ss)', fontsize=18)
seaborn.regplot(x = x, y = y, scatter=None, data = fastestSplits ,order = 2)
loc= matplotlib.dates.AutoDateLocator()
matplotlib.pyplot.gca().xaxis.set_major_locator(loc)
matplotlib.pyplot.gca().yaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M:%S'))
matplotlib.pyplot.gca().xaxis.set_major_formatter(matplotlib.dates.AutoDateFormatter(loc))
matplotlib.pyplot.gcf().autofmt_xdate()
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.savefig('Fastest_1k_Pace_over_Time.png')
matplotlib.pyplot.clf()
# py -c 'import visualiseData; visualiseData.produceTimeElevation()'
def produceTimeElevation():
splits = databaseAccess.getSplits()
base = datetime.datetime(1970, 1, 1, 0, 0, 0)
times = [base + datetime.timedelta(seconds=x) for x in splits['elapsed_time']]
y = matplotlib.dates.date2num(times)
matplotlib.pyplot.plot( splits['elevation_difference'], y, linestyle='', marker='o', markersize=5, alpha=0.1, color="blue")
seaborn.regplot(x = splits['elevation_difference'], y = y, scatter=None, order = 2)
matplotlib.pyplot.title('Running Pace vs. Elevation Change', fontsize=18, fontweight="bold")
matplotlib.pyplot.xticks(fontsize=16)
matplotlib.pyplot.yticks(fontsize=16)
matplotlib.pyplot.xlabel('Elevation Change (m)', fontsize=18)
matplotlib.pyplot.ylabel('1km Pace (hh:mm:ss)', fontsize=18)
matplotlib.pyplot.gca().yaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M:%S'))
matplotlib.pyplot.gcf().autofmt_xdate()
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.savefig('Running_Pace_vs_Elevation_Change.png')
matplotlib.pyplot.clf()
# py -c 'import visualiseData; visualiseData.produceTimeDistance()'
def produceTimeDistance():
print('Starting the produceTimeDistance')
splits = databaseAccess.getSplits()
base = datetime.datetime(1970, 1, 1, 0, 0, 0)
times = [base + datetime.timedelta(seconds=x) for x in splits['elapsed_time']]
y = matplotlib.dates.date2num(times)
seaborn.set_theme()
seaborn.set(style="darkgrid", context="poster")
matplotlib.pyplot.plot( splits['total_distance'], y, linestyle='', marker='o', markersize=5, alpha=0.1, color="blue")
seaborn.regplot(x = splits['total_distance'], y = y, scatter=None, order = 2)
matplotlib.pyplot.title('Running Pace vs. Total Distance', fontsize=18, fontweight="bold")
matplotlib.pyplot.xticks(fontsize=16)
matplotlib.pyplot.yticks(fontsize=16)
matplotlib.pyplot.xlabel('Total Distance (m)', fontsize=18)
matplotlib.pyplot.ylabel('1km Pace (hh:mm:ss)', fontsize=18)
matplotlib.pyplot.gca().yaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M:%S'))
matplotlib.pyplot.gcf().autofmt_xdate()
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.gcf().set_size_inches(8,6)
matplotlib.pyplot.savefig('Running_Pace_vs_Total_Distance.png',dpi=300)
matplotlib.pyplot.clf()
# py -c 'import visualiseData; visualiseData.produceActivtyHistogram()'
def produceActivtyHistogram():
activities = databaseAccess.getActivityDistances()
# Apply the default theme
seaborn.set_theme()
seaborn.set(style="darkgrid", context="poster")
seaborn.catplot(x="nearest_5miles", y="cnt", data=activities, kind = "bar")
matplotlib.pyplot.title('Number of Activities per Distance', fontsize=20 ) #, fontweight="bold")
matplotlib.pyplot.xticks(fontsize=8,rotation=90)
matplotlib.pyplot.yticks(fontsize=8)
matplotlib.pyplot.xlabel('Distance (miles)', fontsize=14)
matplotlib.pyplot.ylabel('Count of Activities', fontsize=14)
figure = matplotlib.pyplot.gcf()
figure.set_size_inches(8, 4.5)
matplotlib.pyplot.savefig('Number_of_Activities_per_Distance.png', dpi=300)
matplotlib.pyplot.clf()
def produceActivtyRideHistogram():
Ride = databaseAccess.getActivityRideDistances()
# Apply the default theme
seaborn.set_theme()
seaborn.set(style="darkgrid", context="poster")
seaborn.catplot(x="nearest_5miles", y="cnt", data=Ride, kind = "bar")
matplotlib.pyplot.title('Number of Outside Rides per Distance', fontsize=20 ) #, fontweight="bold")
matplotlib.pyplot.xlabel('Distance (miles)', fontsize=14)
matplotlib.pyplot.ylabel('Count of Rides', fontsize=14)
matplotlib.pyplot.xticks(fontsize=8,rotation=90)
matplotlib.pyplot.yticks(fontsize=8)
figure = matplotlib.pyplot.gcf()
figure.set_size_inches(8, 4.5)
matplotlib.pyplot.savefig('Number_of_Rides_per_Distance.png', dpi=300)
matplotlib.pyplot.clf()
# py -c 'import visualiseData; visualiseData.produceTimePace()'
def produceTimePace():
splits = databaseAccess.getMonthSplits()
dates = [dateutil.parser.parse(x) for x in splits['activity_month']]
x = matplotlib.dates.date2num(dates)
base = datetime.datetime(1970, 1, 1, 0, 0, 0)
times = [base + datetime.timedelta(seconds=x) for x in splits['elapsed_time']]
y = matplotlib.dates.date2num(times)
matplotlib.pylab.plot_date(x, y, linestyle='', marker='o', markersize=5, alpha=0.1, color="blue")
matplotlib.pyplot.title('Running Pace over Time', fontsize=18, fontweight="bold")
matplotlib.pyplot.xticks(fontsize=16, rotation='vertical')
matplotlib.pyplot.yticks(fontsize=16)
matplotlib.pyplot.xlabel('Date', fontsize=18)
matplotlib.pyplot.ylabel('Pace (km / hh:mm:ss)', fontsize=18)
seaborn.regplot(x = x, y = y, scatter=None, data = splits ,order = 2)
loc= matplotlib.dates.AutoDateLocator()
matplotlib.pyplot.gca().xaxis.set_major_locator(loc)
matplotlib.pyplot.gca().yaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M:%S'))
matplotlib.pyplot.gca().xaxis.set_major_formatter(matplotlib.dates.AutoDateFormatter(loc))
matplotlib.pyplot.gcf().autofmt_xdate()
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.gcf().set_size_inches(18.5, 10.5)
matplotlib.pyplot.savefig('Running_Pace_over_Time.png')
matplotlib.pyplot.clf()
# py -c 'import visualiseData; visualiseData.produceElapsedTimeDistance()'
def produceElapsedTimeDistance():
splits = databaseAccess.getSplits()
figure = matplotlib.pyplot.gcf()
figure.set_size_inches(8, 6)
seaborn.set_theme()
seaborn.set(style="darkgrid", context="poster")
splits = pandas.merge(splits, splits.groupby(['activity_id'])[['elapsed_time']].agg('sum'), on=["activity_id", "activity_id"])
splits['total_distance'] = splits['total_distance']* 0.000621371
base = datetime.datetime(1970, 1, 1, 0, 0, 0)
times = [base + datetime.timedelta(seconds=x) for x in splits['elapsed_time_y']]
y = matplotlib.dates.date2num(times)
max_distance = int(round(splits['total_distance'].max()))
max_time = max(times)
if max_distance < 100:
# Assume we want to extend for a century
max_distance = 100
# Since we haven't been that far, assume we can finish in under 8 hours
max_time = datetime.datetime(1970, 1, 1, 10, 0, 0)
_, axes = matplotlib.pyplot.subplots()
xlim = [0,max_distance]
axes.set_xlim(xlim)
ylim = [0,max_time]
axes.set_ylim(ylim)
matplotlib.pyplot.plot( splits['total_distance'], y, linestyle='', marker='o', markersize=3, alpha=0.1, color="orange")
seaborn.regplot(x = splits['total_distance'], y = y, scatter=None, data = splits ,order = 2, ax = axes, truncate = False)
matplotlib.pyplot.title('Time Taken Over Distances', fontsize=20)
matplotlib.pyplot.xticks(fontsize=16)
matplotlib.pyplot.yticks(fontsize=16)
matplotlib.pyplot.xlabel('Total Distance (miles)', fontsize=14)
matplotlib.pyplot.ylabel('Time Taken (hh:mm)', fontsize=14)
matplotlib.pyplot.grid()
matplotlib.pyplot.gca().yaxis.set_major_locator(matplotlib.dates.HourLocator(interval = 1))
matplotlib.pyplot.gca().yaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.savefig('Time_Taken_Distance.png', dpi=300)
matplotlib.pyplot.clf()
def produceAverageSpeedOutside():
AllActivities = databaseAccess.getActivities()
NightWalk = AllActivities[AllActivities['type'] == 'Night Walk']
Walk = AllActivities[AllActivities['type'] == 'Walk']
Run = AllActivities[AllActivities['type'] == 'Run']
EBikeRide = AllActivities[AllActivities['type'] == 'EBikeRide']
VirtualRide = AllActivities[AllActivities['type'] == 'VirtualRide']
Ride = AllActivities[AllActivities['type'] == 'Ride']
frames = [EBikeRide, VirtualRide, Ride]
AllRides = pandas.concat(frames)
frames = [EBikeRide, Ride]
AllOutsideRides = pandas.concat(frames)
AllRidesSince19 = AllOutsideRides[AllOutsideRides['Year'].isin([2019,2020,2021,2022,2023,2024,2025])]
# Apply the default theme
seaborn.set_theme()
seaborn.set(style="darkgrid", context="poster")
figure = matplotlib.pyplot.gcf()
figure.set_size_inches(12,9 )
matplotlib.pyplot.tight_layout()
averageoutside = seaborn.relplot(x='distance_miles', y = 'average_speed_miles_hr', data = AllRidesSince19, hue = 'type', col = 'Year', s=30)
averageoutside.set_titles("{col_name}") # use this argument literally
averageoutside.set_xlabels('Total Distance (miles)', fontsize=18)
averageoutside.set_ylabels('Average Speed (mph)', fontsize=18)
# Saving the Seaborn Figure:
plt.savefig('AverageSpeedOutSide.png')# , dpi=300)
def produceAverageSpeed():
AllActivities = databaseAccess.getActivities()
NightWalk = AllActivities[AllActivities['type'] == 'Night Walk']
Walk = AllActivities[AllActivities['type'] == 'Walk']
Run = AllActivities[AllActivities['type'] == 'Run']
EBikeRide = AllActivities[AllActivities['type'] == 'EBikeRide']
VirtualRide = AllActivities[AllActivities['type'] == 'VirtualRide']
Ride = AllActivities[AllActivities['type'] == 'Ride']
frames = [EBikeRide, VirtualRide, Ride]
AllRides = pandas.concat(frames)
AllRidesSince19 = AllRides[AllRides['Year'].isin([2019,2020,2021,2022,2023,2024,2025])]
# Apply the default theme
seaborn.set_theme()
seaborn.set(style="darkgrid", context="poster")
figure = matplotlib.pyplot.gcf()
figure.set_size_inches(18.5, 10.5)
howfast = seaborn.relplot(x='distance_miles', y = 'average_speed_miles_hr', data = AllRidesSince19, hue = 'type', col = 'Year')
# Adjust title and axis labels directly
howfast.set_titles("{col_name}") # use this argument literally
howfast.set_xlabels('Total Distance (miles)', fontsize=18)
howfast.set_ylabels('Average Speed (mph)', fontsize=18)
# Saving the Seaborn Figure:
plt.savefig('AverageSpeed.png',dpi=300)
def produceAveragePower():
AllActivities = databaseAccess.getActivities()
NightWalk = AllActivities[AllActivities['type'] == 'Night Walk']
Walk = AllActivities[AllActivities['type'] == 'Walk']
Run = AllActivities[AllActivities['type'] == 'Run']
EBikeRide = AllActivities[AllActivities['type'] == 'EBikeRide']
VirtualRide = AllActivities[AllActivities['type'] == 'VirtualRide']
Ride = AllActivities[AllActivities['type'] == 'Ride']
frames = [EBikeRide, VirtualRide, Ride]
AllRides = pandas.concat(frames)
frames = [EBikeRide, Ride]
AllOutsideRides = pandas.concat(frames)
AllRidesSince19 = AllRides[AllRides['Year'].isin([2019,2020,2021,2022,2023,2024,2025])]
# Apply the default theme
seaborn.set_theme()
seaborn.set(style="darkgrid", context="poster")
figure = matplotlib.pyplot.gcf()
figure.set_size_inches(12,9)
power = seaborn.relplot(x='distance_miles', y = 'average_watts', data = AllRidesSince19, hue = 'type', col = 'Year',s=150)
# Adjust title and axis labels directly
power.set_titles("{col_name}") # use this argument literally
power.set_xlabels('Total Distance (miles)', fontsize=18)
power.set_ylabels('Average Power (watts)', fontsize=18)
# Saving the Seaborn Figure:
plt.savefig('AveragePower.png', dpi=300)
def produceAverageCadence():
AllActivities = databaseAccess.getActivities()
NightWalk = AllActivities[AllActivities['type'] == 'Night Walk']
Walk = AllActivities[AllActivities['type'] == 'Walk']
Run = AllActivities[AllActivities['type'] == 'Run']
EBikeRide = AllActivities[AllActivities['type'] == 'EBikeRide']
VirtualRide = AllActivities[AllActivities['type'] == 'VirtualRide']
Ride = AllActivities[AllActivities['type'] == 'Ride']
frames = [EBikeRide, VirtualRide, Ride]
AllRides = pandas.concat(frames)
AllRidesSince20 = AllRides[AllRides['Year'].isin([2020,2021,2022,2023,2024,2025])]
frames = [EBikeRide, Ride]
AllOutsideRides = pandas.concat(frames)
# Apply the default theme
figure = matplotlib.pyplot.gcf()
figure.set_size_inches(12,9 )
seaborn.set_theme()
seaborn.set(style="darkgrid", context="poster")
matplotlib.pyplot.tight_layout()
cadence = seaborn.relplot(x='distance_miles', y = 'average_cadence', data = AllRidesSince20, hue = 'type', col = 'Year',s=100)
# Adjust title and axis labels directly
cadence.set_titles("{col_name}") # use this argument literally
cadence.set_xlabels('Total Distance (miles)', fontsize=18)
cadence.set_ylabels('Average Cadence', fontsize=18)
# Saving the Seaborn Figure:
plt.savefig('AverageCadence.png',dpi=300)
def produceDistanceByDay():
AllActivities = databaseAccess.getActivities()
NightWalk = AllActivities[AllActivities['type'] == 'Night Walk']
Walk = AllActivities[AllActivities['type'] == 'Walk']
Run = AllActivities[AllActivities['type'] == 'Run']
EBikeRide = AllActivities[AllActivities['type'] == 'EBikeRide']
VirtualRide = AllActivities[AllActivities['type'] == 'VirtualRide']
Ride = AllActivities[AllActivities['type'] == 'Ride']
frames = [EBikeRide, VirtualRide, Ride]
AllRides = pandas.concat(frames)
AllRidesSince20 = AllRides[AllRides['Year'].isin([2020,2021,2022,2023,2024,2025])]
frames = [EBikeRide, Ride]
AllOutsideRides = pandas.concat(frames)
day_of_week_order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday' ]
figure = matplotlib.pyplot.gcf()
figure.set_size_inches(12,9 )
seaborn.set_theme()
seaborn.set(style="darkgrid", context="poster")
matplotlib.pyplot.tight_layout()
howfar = seaborn.catplot(x='start_date_day_of_week', y='distance_miles', kind='strip', data=AllRides,
order=day_of_week_order, col='type', height=9, aspect=1,
palette='pastel',s=10)
howfar.set_titles("{col_name}") # use this argument literally
howfar.set_xlabels('Week Day', fontsize=18)
howfar.set_ylabels('Distance (miles)', fontsize=18)
howfar.set_xticklabels(rotation=90)
# Saving the Seaborn Figure:
plt.savefig('DistanceByDay.png',dpi=300)
def produceDistanceByDayRide():
AllActivities = databaseAccess.getActivities()
Ride = AllActivities[AllActivities['type'] == 'Ride']
day_of_week_order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday' ]
figure = matplotlib.pyplot.gcf()
figure.set_size_inches(12,9 )
seaborn.set_theme()
seaborn.set(style="darkgrid", context="poster")
matplotlib.pyplot.tight_layout()
howfar = seaborn.catplot(x='start_date_day_of_week', y='distance_miles', kind='strip', data=Ride,
order=day_of_week_order, col='type', height=9, aspect=1,
palette='pastel',s=10)
howfar.set_titles("{col_name}") # use this argument literally
howfar.set_xlabels('Week Day', fontsize=18)
howfar.set_ylabels('Distance (miles)', fontsize=18)
(howfar.set_axis_labels().set_xticklabels(rotation=30))
# Saving the Seaborn Figure:
plt.savefig('DistanceByDayRide.png', dpi=300)
def produceCadenceByDay():
print('Starting the produceCadenceByDay')
AllActivities = databaseAccess.getActivities()
NightWalk = AllActivities[AllActivities['type'] == 'Night Walk']
Walk = AllActivities[AllActivities['type'] == 'Walk']
Run = AllActivities[AllActivities['type'] == 'Run']
EBikeRide = AllActivities[AllActivities['type'] == 'EBikeRide']
VirtualRide = AllActivities[AllActivities['type'] == 'VirtualRide']
Ride = AllActivities[AllActivities['type'] == 'Ride']
frames = [EBikeRide, VirtualRide, Ride]
AllRides = | pandas.concat(frames) | pandas.concat |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: abnormal_detection_gaussian.py
@time: 2019-04-18 18:03
"""
import pandas as pd
from mayiutils.file_io.pickle_wrapper import PickleWrapper as picklew
from feature_selector import FeatureSelector
if __name__ == '__main__':
mode = 4
if mode == 4:
"""
feature selector
"""
# df1 = pd.read_excel('data.xlsx').iloc[:, 1:]
# print(df1.info())
df = pd.read_excel('/Users/luoyonggui/Documents/work/dataset/0/data.xlsx')
# print(df.info())# 查看df字段和缺失值信息
label = df['理赔结论']
df = df.drop(columns=['理赔结论'])
fs = FeatureSelector(data=df, labels=label)
# 缺失值处理
fs.identify_missing(missing_threshold=0.6)
if mode == 3:
"""
合并参保人基本信息
"""
df1 = pd.read_excel('data.xlsx', 'Sheet2').dropna(axis=1, how='all')
# print(df1.info())
"""
归并客户号 528 non-null int64
性别 528 non-null object
出生年月日 528 non-null datetime64[ns]
婚姻状况 432 non-null object
职业 484 non-null float64
职业危险等级 484 non-null float64
年收入 528 non-null int64
年交保费 528 non-null float64
犹豫期撤单次数 528 non-null int64
既往理赔次数 528 non-null int64
既往拒保次数 528 non-null int64
既往延期承保次数 528 non-null int64
非标准体承保次数 528 non-null int64
既往调查标识 528 non-null object
既往体检标识 528 non-null object
累积寿险净风险保额 528 non-null float64
累积重疾净风险保额 528 non-null float64
投保人年收入与年交保费比值 437 non-null float64
被保险人有效重疾防癌险保单件数 528 non-null int64
被保险人有效短期意外险保单件数 528 non-null int64
被保险人有效短期健康险保单件数 528 non-null int64
被保险人90天内生效保单件数 528 non-null int64
被保险人180天内生效保单件数 528 non-null int64
被保险人365天内生效保单件数 528 non-null int64
被保险人730天内生效保单件数 528 non-null int64
客户黑名单标识 528 non-null object
保单失效日期 11 non-null datetime64[ns]
保单复效日期 7 non-null datetime64[ns]
受益人变更日期 12 non-null datetime64[ns]
"""
cols = list(df1.columns)
cols.remove('保单失效日期')
cols.remove('保单复效日期')
cols.remove('受益人变更日期')
cols.remove('客户黑名单标识')#只有一个值
df1['出生年'] = df1['出生年月日'].apply(lambda x: int(str(x)[:4]))
cols.append('出生年')
cols.remove('出生年月日')
t = pd.get_dummies(df1['婚姻状况'], prefix='婚姻状况')
df2 = pd.concat([df1, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('婚姻状况')
t = pd.get_dummies(df2['性别'], prefix='性别')
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('性别')
t = pd.get_dummies(df2['既往调查标识'], prefix='既往调查标识')
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('既往调查标识')
t = pd.get_dummies(df2['既往体检标识'], prefix='既往体检标识')
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('既往体检标识')
# print(df2['职业'].value_counts())
"""
取前四位 分类
"""
df2['职业'] = df2['职业'].apply(lambda x: str(x)[:1])
# print(df2['职业'].value_counts())
t = pd.get_dummies(df2['职业'], prefix='职业')
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('职业')
print(df2['职业危险等级'].value_counts())
t = pd.get_dummies(df2['职业危险等级'], prefix='职业危险等级')
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('职业危险等级')
df2['投保人年收入与年交保费比值'] = df2['投保人年收入与年交保费比值'].fillna(0)
"""
归并客户号有重复的,取重复值的第一条
"""
df2 = df2.drop_duplicates(subset=['归并客户号'], keep='first')
print(df2['归并客户号'].value_counts())
df2 = df2.rename(columns={'归并客户号': '被保人归并客户号'})
cols.remove('归并客户号')
cols.append('被保人归并客户号')
# print(df2[cols].info())
#合并
train_df = picklew.loadFromFile('train_data1.pkl')
print(train_df.shape)
train_df = pd.merge(train_df, df2[cols], how='left', on='被保人归并客户号')
del train_df['营销员工号']
del train_df['被保人核心客户号']
del train_df['保人归并客户号']
del train_df['被保人归并客户号']
print(train_df.shape) # (562, 30)
print(train_df.info())
del train_df['理赔金额']
picklew.dump2File(train_df, 'train_data2.pkl')
if mode == 2:
"""
合并销售人员信息
"""
df1 = pd.read_excel('data.xlsx', 'Sheet1')
# print(df1.info())
"""
RangeIndex: 532 entries, 0 to 531
Data columns (total 7 columns):
营销员工号 532 non-null int64
营销员黑名单标记 326 non-null object
营销员入司时间 326 non-null datetime64[ns]
营销员离职时间 97 non-null datetime64[ns]
营销员所售保单数量 532 non-null int64
营销员所售保单标准体数量 532 non-null int64
营销员所售保单出险数量 532 non-null int64
"""
cols = list(df1.columns)
# print(df1['营销员黑名单标记'].value_counts())
"""
全部都是N, 没有意义, 删除
"""
cols.remove('营销员离职时间')
df2 = df1[cols].dropna()
cols.remove('营销员黑名单标记')
cols.remove('营销员入司时间')
df2 = df2[cols]
# print(df2.info())
"""
营销员工号 326 non-null int64
营销员所售保单数量 326 non-null int64
营销员所售保单标准体数量 326 non-null int64
营销员所售保单出险数量 326 non-null int64
"""
# print(df2['营销员工号'].value_counts())
# print(df2.info())
#合并df
train_df = picklew.loadFromFile('train_data.pkl')
train_df = train_df.rename(columns={'(SELECTDISTINCTLJ.AGENTCODEFRO销售人员工号':'营销员工号'})
print(train_df.shape)
# train_df = pd.merge(train_df, df2, how='left', on='营销员工号')
print(train_df.shape)#(562, 30)
print(train_df.info())
picklew.dump2File(train_df, 'train_data1.pkl')
if mode == 1:
"""
主表
"""
df1 = pd.read_excel('data.xlsx').iloc[:, 1:]
# print(df1.shape)#(562, 41)
# print(df1.columns)
"""
['平台流水号', '保单管理机构', '保单号', '指定受益人标识', '受益人与被保险人关系', '交费方式',
'交费期限', '核保标识', '核保结论', '投保时年龄', '基本保额与体检保额起点比例', '生调保额起点',
'投保保额临近核保体检临界点标识', '投保保额', '临近核保生调临界点标识', '理赔金额', '累计已交保费', '理赔结论',
'Unnamed: 19', '生效日期', '出险前最后一次复效日期', '承保后最小借款日期', '出险日期', '报案时间',
'申请日期', '出险减生效天数', '出险减最后一次复效天数', '重疾保单借款减生效日期天数', '申请时间减出险时间',
'报案时间减出险时间', '出险原因1', '出险原因2', '出险原因3', '出险结果', '保单借款展期未还次数', '失复效记录次数',
'销售渠道', '(SELECTDISTINCTLJ.AGENTCODEFRO销售人员工号', '被保人核心客户号', '保人归并客户号',
'被保人归并客户号']
"""
# 删除全部为null的列
df2 = df1.dropna(axis=1, how='all')
# print(df2.shape)#(562, 33)
# print(df2.columns)
"""
['平台流水号', '保单管理机构', '保单号', '指定受益人标识', '受益人与被保险人关系', '交费方式', '交费期限',
'核保标识', '核保结论', '投保时年龄', '投保保额', '理赔金额', '累计已交保费', '理赔结论',
'Unnamed: 19', '生效日期', '出险前最后一次复效日期', '承保后最小借款日期', '出险日期', '报案时间',
'申请日期', '出险减生效天数', '出险减最后一次复效天数', '申请时间减出险时间', '报案时间减出险时间', '出险原因1',
'出险结果', '失复效记录次数', '销售渠道', '(SELECTDISTINCTLJ.AGENTCODEFRO销售人员工号',
'被保人核心客户号', '保人归并客户号', '被保人归并客户号']
"""
# print(df2.info())
"""
平台流水号 562 non-null int64
保单管理机构 562 non-null int64
保单号 562 non-null int64
指定受益人标识 562 non-null object
受益人与被保险人关系 538 non-null object
交费方式 562 non-null object
交费期限 562 non-null int64
核保标识 562 non-null object
核保结论 544 non-null object
投保时年龄 562 non-null int64
投保保额 562 non-null float64
理赔金额 562 non-null float64
累计已交保费 562 non-null float64
理赔结论 562 non-null object
Unnamed: 19 562 non-null int64
生效日期 562 non-null datetime64[ns]
出险前最后一次复效日期 6 non-null datetime64[ns]
承保后最小借款日期 2 non-null datetime64[ns]
出险日期 562 non-null datetime64[ns]
报案时间 119 non-null datetime64[ns]
申请日期 562 non-null datetime64[ns]
出险减生效天数 562 non-null int64
出险减最后一次复效天数 6 non-null float64
申请时间减出险时间 562 non-null int64
报案时间减出险时间 119 non-null float64
出险原因1 562 non-null object
出险结果 552 non-null object
失复效记录次数 562 non-null int64
销售渠道 562 non-null object
(SELECTDISTINCTLJ.AGENTCODEFRO销售人员工号 562 non-null int64
被保人核心客户号 562 non-null int64
保人归并客户号 562 non-null int64
被保人归并客户号 562 non-null int64
"""
train_col = list(df2.columns)
train_col.remove('平台流水号')
train_col.remove('Unnamed: 19')
train_col.remove('生效日期')
train_col.remove('出险日期')
train_col.remove('报案时间')
train_col.remove('申请日期')
train_col.remove('出险减最后一次复效天数')
train_col.remove('报案时间减出险时间')
train_col.remove('出险前最后一次复效日期')
train_col.remove('承保后最小借款日期')
# print(df2[train_col].info())
"""
保单管理机构 562 non-null int64
保单号 562 non-null int64
指定受益人标识 562 non-null object
受益人与被保险人关系 538 non-null object
交费方式 562 non-null object
交费期限 562 non-null int64
核保标识 562 non-null object
核保结论 544 non-null object
投保时年龄 562 non-null int64
投保保额 562 non-null float64
理赔金额 562 non-null float64
累计已交保费 562 non-null float64
出险减生效天数 562 non-null int64
申请时间减出险时间 562 non-null int64
出险原因1 562 non-null object
出险结果 552 non-null object
失复效记录次数 562 non-null int64
销售渠道 562 non-null object
(SELECTDISTINCTLJ.AGENTCODEFRO销售人员工号 562 non-null int64
被保人核心客户号 562 non-null int64
保人归并客户号 562 non-null int64
被保人归并客户号 562 non-null int64
"""
label = df2['理赔结论']
train_col.remove('理赔结论')#删除label
# print(label.value_counts())
"""
正常给付 432
全部拒付 107
协议给付 15
部分给付 8
"""
# print(df1['保单号'].value_counts())
train_col.remove('保单号')
# print(df1['保单管理机构'].value_counts())
"""
取前4位
"""
df2['保单管理机构'] = df2['保单管理机构'].copy().apply(lambda x: str(x)[:4])
# print(df2['保单管理机构'].value_counts())
"""
8603 280
8602 163
8605 65
8604 34
8606 16
8608 4
"""
t = pd.get_dummies(df2['指定受益人标识'], prefix='指定受益人标识')
print(df2.shape)
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
train_col.extend(list(t.columns))
train_col.remove('指定受益人标识')
t = pd.get_dummies(df2['交费方式'], prefix='交费方式')
print(df2.shape)
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
train_col.extend(list(t.columns))
train_col.remove('交费方式')
t = | pd.get_dummies(df2['核保标识'], prefix='核保标识') | pandas.get_dummies |
import math
import os
from os.path import join as pjoin
import json
import copy
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import GPUtil
import pandas as pd
from multiprocessing import Pool
from tqdm import tqdm
import sklearn.metrics
from .config import print_config, class_labels
from .utils import (
anno_to_binary, cut_score, debug, display_imgs, info, gen_cwd_slash, labels_to_str, load_config, load_img,
np_macro_f1, str_to_labels, class_id_to_label, class_ids_to_label, combine_windows, chunk, compute_i_coords,
format_macro_f1_details, vec_to_str
)
# from .utils_heavy import predict, model_from_config
from .ignite_trainer import predict as predict
# def predict_and_save_scores(
# config,
# path_to_anno=None,
# path_to_imgs=None,
# save_scores_to=None,
# to_csv=None,
# ):
# model = model_from_config(config, which='latest')
# valid_anno = pd.read_csv(path_to_anno, index_col=0)
# predict(config)
# return valid_anno_predicted
def remove_scores_predicted(config):
cwd_slash = gen_cwd_slash(config)
pd.read_csv(cwd_slash('validation_predictions.csv'), index_col=0) \
.drop('Scores Predicted', 1) \
.to_csv(cwd_slash('validation_predictions.csv'))
def evaluate_validation_prediction(config):
info('evaluate_validation_prediction()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(config['path_to_valid_anno_cache'], index_col=0, dtype=object)
prediction_df = pd.read_csv(cwd_slash('valid_predicted.csv'), index_col=0, dtype=object)
anno = anno.join(prediction_df, how='left')
# DEBUG BEGIN
anno.loc[:, ['Target', 'Predicted', 'folder', 'extension']].to_csv(cwd_slash('valid_anno_predicted.csv'))
# DEBUG END
y_true, y_pred = anno_to_binary(anno, config)
macro_f1_score, f1_details = np_macro_f1(y_true, y_pred, config, return_details=True)
print(format_macro_f1_details(f1_details, config))
print(f'macro_f1_score = {macro_f1_score}')
def final_corrections(config):
info('final_corrections()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('test_predicted.csv'), index_col=0)
# correct best submission [TODO: REMOVE: not for private leaderboard] --------------
# best_anno = pd.read_csv(cwd_slash('submission_587.csv'), index_col=0)
# rare_classes = [15, 27, 10, 8, 9, 17, 20, 24, 26]
# comparison_anno = anno.copy()
# comparison_anno['best'] = best_anno['Predicted']
# plot_imgs(
# config,
# comparison_anno.query('best != Predicted').sample(28),
# save_as='./tmp/best_submission_corrections.png',
# folder='data/test_minimaps',
# extension='jpg',
# )
# new_rows = []
# for id_, row in comparison_anno.iterrows():
# current_labels = str_to_labels(row['Predicted'])
# best_labels = str_to_labels(row['best'])
# for c in rare_classes:
# if c in current_labels and c not in best_labels:
# debug(f"removing {c} from {id_}")
# current_labels.remove(c)
# if c not in current_labels and c in best_labels:
# debug(f"adding {c} to {id_}")
# current_labels.append(c)
# new_row = {
# 'Id': id_,
# 'Predicted': labels_to_str(current_labels),
# }
# new_rows.append(new_row)
# anno = pd.DataFrame.from_records(new_rows).set_index('Id')
# debug(f"anno ({len(anno)}) =\n{anno.head(10)}")
# correct leaked --------------
# pairs_anno = pd.read_csv('data/identical_pairs.csv')
# hpa_anno = pd.read_csv('data/hpa_public_imgs.csv', index_col=0)
# correction_anno = pairs_anno.join(hpa_anno, how='left', on=['hpa_id'])\
# .join(anno, how='left', on=['test_id'])
# correction_anno['Target'] = [labels_to_str(str_to_labels(x)) for x in correction_anno['Target']]
# debug(f"correction_anno['test_id'] = {correction_anno['test_id']}")
# debug(f"len = {len(anno.loc[correction_anno['test_id'], 'Predicted'].values)}")
# correction_anno['Predicted'] = anno.loc[correction_anno['test_id'], 'Predicted'].values
# actual_corrections = correction_anno.query('Predicted != Target').set_index('test_id')
# # DEBUG BEGIN
# # plot_imgs(config, actual_corrections, folder='data/test_minimaps', extension='jpg')
# # DEBUG END
# debug(f"making {len(correction_anno)} corrections, {len(actual_corrections)} are actually different")
# debug(f"actual_corrections =\n{actual_corrections}")
# anno.loc[correction_anno['test_id'], 'Predicted'] = correction_anno['Target'].values
# correct leaked 2 --------------
pairs_anno = pd.read_csv('data/identical_pairs_new_fixed.csv')
for i_begin, i_end in chunk(len(pairs_anno), 24):
plot_imgs(
config,
pairs_anno.iloc[i_begin:i_end].drop('test_id', axis=1).set_index('hpa_id'),
save_as=f'./tmp/diff_{i_begin}_hpa.jpg',
folder='data/hpa_public_imgs',
extension='jpg',
background_color=None,
channel=None,
dpi=100,
)
plot_imgs(
config,
pairs_anno.iloc[i_begin:i_end].drop('hpa_id', axis=1).set_index('test_id'),
save_as=f'./tmp/diff_{i_begin}_test.jpg',
folder='data/test_full_size',
extension='tif',
background_color=None,
channel=['red', 'green', 'blue'],
dpi=100,
)
hpa_anno = pd.read_csv('data/hpa_public_imgs.csv', index_col=0)
correction_anno = pairs_anno.join(hpa_anno, how='left', on=['hpa_id'])\
.join(anno, how='left', on=['test_id'])
correction_anno['Target'] = [labels_to_str(str_to_labels(x)) for x in correction_anno['Target']]
debug(f"correction_anno['test_id'] = {correction_anno['test_id']}")
debug(f"len = {len(anno.loc[correction_anno['test_id'], 'Predicted'].values)}")
correction_anno['Predicted'] = anno.loc[correction_anno['test_id'], 'Predicted'].values
actual_corrections = correction_anno.query('Predicted != Target').set_index('test_id')
# DEBUG BEGIN
# plot_imgs(config, actual_corrections, folder='data/test_minimaps', extension='jpg')
# DEBUG END
debug(f"making {len(correction_anno)} corrections, {len(actual_corrections)} are actually different")
debug(f"actual_corrections =\n{actual_corrections}")
anno.loc[correction_anno['test_id'], 'Predicted'] = correction_anno['Target'].values
# DEBUG BEGIN
# plot_imgs(
# config,
# anno.loc[[27 in str_to_labels(p) for p in anno['Predicted']]],
# folder='data/test_minimaps',
# extension='jpg'
# )
# DEBUG END
anno.to_csv(cwd_slash('test_predicted_corrected.csv'))
# def list_confusion(config):
# fn_counts_list = {}
# class_labels = [f'{k}-{classes[k]}' for k in range(n_classes)]
# for which_class in tqdm(range(n_classes)):
# cwd_slash = gen_cwd_slash(config)
# anno = pd.read_csv(cwd_slash('validation_predictions.csv'), index_col=0)
# y_true, y_pred = anno_to_binary(anno)
# fn = y_true * (1 - y_pred)
# fp = (1 - y_true) * y_pred
# i_fn_predictions = np.nonzero(fn[:, which_class])[0]
# fn_counts = fp[i_fn_predictions, :].sum(axis=0) / len(i_fn_predictions)
# fn_counts_list[class_labels[which_class]] = fn_counts
# # out = pd.Series(fn_counts, index=pd.Index(range(n_classes), name='class'))\
# # .sort_values(ascending=False)\
# # .head(3)
# pd.DataFrame(fn_counts_list, index=class_labels).to_csv('./tmp/confusion.csv')
def plot_imgs(
config,
anno,
save_as='./tmp/imgs.jpg',
folder=None,
extension=None,
background_color=None,
channel=None,
dpi=100,
):
img_list = []
for id_, row in anno.iterrows():
img = load_img(
id_,
config,
resize=False,
folder=row.get('folder') or folder,
channel=channel,
extension=row.get('extension') or extension,
)
# if type(channel) is str:
# channel = {
# 'red': 0,
# 'green': 1,
# 'blue': 2,
# 'yellow': 3,
# }.get(channel)
# if channel is not None:
# img = img[:, :, channel]
debug(f' - Loaded image {id_} with size {img.shape}')
img_label = '\n'.join([f'{id_}'] + [f'{k} = {v}' for k, v in row.items()])
img_list.append((img, img_label))
display_imgs(
img_list,
save_as=save_as,
background_color=background_color,
dpi=dpi,
)
def plot_tfpn_examples(config, which_class, max_n_imgs=28, output_folder='./tmp'):
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('validation_predictions.csv'), index_col=0)
y_true, y_pred = anno_to_binary(anno)
y_true = y_true[:, which_class]
y_pred = y_pred[:, which_class]
def plot_imgs(selector, filename, background_color):
debug(f'selector = {selector}')
if type(config['score_threshold']) is list:
score_threshold = config['score_threshold'][which_class]
else:
score_threshold = config['score_threshold']
tp_idxs = np.nonzero(selector > score_threshold)[0]
if len(tp_idxs) > max_n_imgs:
sample_idxs = np.sort(np.random.choice(range(len(tp_idxs)), max_n_imgs, replace=False))
tp_idxs = tp_idxs[sample_idxs]
img_list = []
for idx in tp_idxs:
row = anno.iloc[idx]
img_id = row.name
labels_true = class_ids_to_label(str_to_labels(row['Target']), config)
labels_pred = class_ids_to_label(str_to_labels(row['Predicted']), config)
img_label = '\n'.join([
f'{img_id}',
f'T: {labels_true}',
f'P: {labels_pred}',
])
# img = load_img(img_id, self.config, resize=False, folder='./data/train_full_size', extension='tif')
img = load_img(
img_id,
config,
resize=False,
folder=config['path_to_valid'],
channel=None,
extension=config['img_extension'],
)
debug(f' - Loaded image {img_id} with size {img.shape}')
img_list.append((img, img_label))
display_imgs(
img_list,
save_as=filename,
background_color=background_color,
)
def out_slash(fn):
return pjoin(output_folder, fn)
plot_imgs(y_true * y_pred, out_slash(f'class_{which_class}_true_positives.png'), 'white')
plot_imgs((1 - y_true) * y_pred, out_slash(f'class_{which_class}_false_positives.png'), 'yellow')
plot_imgs(y_true * (1 - y_pred), out_slash(f'class_{which_class}_false_negatives.png'), 'blue')
# plot_imgs((1 - y_true) * (1 - y_pred), out_slash(f'class_{which_class}_true_negatives.png'), 'black')
def add_extra_data_into_train_anno(config):
cwd_slash = gen_cwd_slash(config)
train_anno = pd.read_csv(cwd_slash('train_windowed_anno.csv'), index_col=0)
valid_anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'), index_col=0)
train_with_hpa_anno = pd.read_csv('data/train_with_hpa.csv', index_col=0)
train_windowed_anno = pd.read_csv('data/train_windowed.csv', index_col=0)
hpa_ids = set(train_with_hpa_anno.index)
existing_ids = set(valid_anno['source_img_id']).union(train_anno['source_img_id'])
new_ids = hpa_ids.difference(existing_ids)
extra_train_anno = train_with_hpa_anno.loc[new_ids]
debug(f'extra_train_anno ({len(extra_train_anno)}) =\n{extra_train_anno.head(10)}')
extra_train_windowed_anno = train_windowed_anno.join(extra_train_anno, how='right', on=['source_img_id'])
debug(f'extra_train_windowed_anno ({len(extra_train_windowed_anno)}) =\n{extra_train_windowed_anno.head(10)}')
pd.concat([train_anno, extra_train_windowed_anno]).to_csv(cwd_slash('train_windowed_anno.csv'))
# def calibrate_one_task(task):
# i_class = task['i_class']
# mat_pred_windowed = task['mat_pred_windowed']
# mat_true = task['mat_true']
# alpha = task['alpha']
# i_windowss = task['i_windowss']
# beta_values = task['beta_values']
# config = task['config']
# details_list = []
# for beta in beta_values:
# vec_true = mat_true[:, i_class]
# vec_pred_windowed = mat_pred_windowed[:, i_class]
# list_pred = []
# for i_source, i_windows in enumerate(i_windowss):
# combined_prediction = vec_pred_windowed[i_windows].mean() + vec_pred_windowed[i_windows].mean()
# list_pred.append(combined_prediction)
# vec_pred = np.array(list_pred)
# f1 = np_macro_f1(vec_true, vec_pred, config)
# details_list.append({
# 'i_class': i_class,
# 'alpha': alpha,
# 'beta': beta,
# 'f1': f1,
# })
# # debug(f'i_class = {i_class}, alpha = {alpha}, beta = {beta}, f1 = {f1}, best_f1 = {best_f1}')
# details_df = pd.DataFrame.from_records(details_list)
# return {
# 'task': task,
# 'details_df': details_df,
# }
# def calibrate_windowed_score(
# config,
# n_threads=70,
# n_cols=7,
# save_graph_to='./tmp/calibrate_score_threshold.png',
# epsilon=1e-7,
# ):
# info('calibrate_windowed_score()')
# cwd_slash = gen_cwd_slash(config)
# alpha_values = range(10)
# beta_values = np.linspace(0, 1, 21)
# mat_pred_windowed = np.load(cwd_slash('valid_windowed_scores.npy'))
# valid_anno = pd.read_csv(config['path_to_valid_anno_cache'])
# mat_true = np.zeros((valid_anno.shape[0], 28))
# for i, target_str in enumerate(valid_anno['Target']):
# targets = str_to_labels(target_str)
# mat_true[np.ix_([i], targets)] = 1
# valid_windowed_anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'))
# valid_windowed_anno['row_number'] = valid_windowed_anno.index
# grouped = valid_windowed_anno.groupby('source_img_id')
# source_id_to_window_row_nums = {id_: group['row_number'].values.tolist() for id_, group in grouped}
# i_windowss = [source_id_to_window_row_nums[id_] for id_ in valid_anno['Id']]
# task_list = [
# {
# 'i_class': i_class,
# 'alpha': alpha,
# 'mat_pred_windowed': mat_pred_windowed,
# 'mat_true': mat_true,
# 'i_windowss': i_windowss,
# 'beta_values': beta_values,
# 'config': config,
# } for i_class in range(config['_n_classes']) for alpha in alpha_values
# ]
# details_dfs = []
# with Pool(n_threads) as p:
# result_iter = p.imap_unordered(calibrate_one_task, task_list)
# for i_result, result in enumerate(result_iter):
# info(
# f"({i_result}/{len(task_list)}) "
# f"i_class = {result['task']['i_class']}, "
# f"alpha = {result['task']['alpha']} is done"
# )
# details_dfs.append(result['details_df'])
# details_df = pd.concat(details_dfs)
# if save_graph_to is not None:
# n_rows = math.ceil(config['_n_classes'] / n_cols)
# plt.figure(figsize=(n_cols * 10, n_rows * 10))
# for i_class, group_df in details_df.groupby('i_class'):
# mat = group_df.pivot(index='beta', columns='alpha', values='f1')
# plt.subplot(n_rows, n_cols, i_class + 1, sharex=plt.gca(), sharey=plt.gca())
# plt.imshow(mat, aspect='auto')
# plt.xticks(range(len(alpha_values)), alpha_values)
# plt.yticks(range(len(beta_values)), beta_values)
# plt.text(0, 1, f'{i_class}', transform=plt.gca().transAxes)
# plt.savefig(save_graph_to, dpi=100)
# debug(f'Saved graph to {save_graph_to}')
# print(details_df)
# details_df.to_csv(cwd_slash('calibrate_windowed_score_details.csv'), index=False)
# debug(f"saved to {cwd_slash('calibrate_windowed_score_details.csv')}")
# best_df = pd.concat([group.sort_values('f1').tail(1) for i_class, group in details_df.groupby('i_class')])
# best_df['manually_modified'] = False
# best_df.to_csv(cwd_slash('calibrate_windowed_score.csv'), index=False)
# debug(f"saved to {cwd_slash('calibrate_windowed_score.csv')}")
# def calibrate_score_threshold(config, n_cols=7, save_graph_to='./tmp/calibrate_score_threshold.png', epsilon=1e-7):
# info('calibrate_score_threshold()')
# cwd_slash = gen_cwd_slash(config)
# n_rows = math.ceil(config['_n_classes'] / n_cols)
# mat_pred = np.load(cwd_slash('valid_scores.npy'))
# anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'))
# mat_true = np.zeros_like(mat_pred)
# for i, target_str in enumerate(anno['Target']):
# targets = str_to_labels(target_str)
# mat_true[np.ix_([i], targets)] = 1
# if save_graph_to is not None:
# plt.figure(figsize=(n_cols * 10, n_rows * 10))
# best_ths = []
# for class_id in tqdm(config['classes']):
# thresholds = np.round(np.linspace(0, 1, 1001), 3)
# f1_scores = np.zeros_like(thresholds)
# ps = []
# rs = []
# for i_th, th in enumerate(thresholds):
# y_pred = mat_pred[:, i_class]
# y_pred = np.where(y_pred < th, np.zeros_like(y_pred), np.ones_like(y_pred))
# y_true = mat_true[:, i_class]
# tp = np.sum(y_true * y_pred, axis=0)
# # tn = np.sum((1 - y_true) * (1 - y_pred), axis=0)
# fp = np.sum((1 - y_true) * y_pred, axis=0)
# fn = np.sum(y_true * (1 - y_pred), axis=0)
# p = tp / (tp + fp + epsilon)
# r = tp / (tp + fn + epsilon)
# ps.append(p)
# rs.append(r)
# out = 2 * p * r / (p + r + epsilon)
# # replace all NaN's with 0's
# out = np.where(np.isnan(out), np.zeros_like(out), out)
# f1_scores[i_th] = out
# if save_graph_to is not None:
# plt.subplot(n_rows, n_cols, i_class + 1, sharex=plt.gca(), sharey=plt.gca())
# plt.plot(thresholds, f1_scores)
# plt.plot(thresholds, ps)
# plt.plot(thresholds, rs)
# plt.text(0, 1, f'{i_class}', transform=plt.gca().transAxes)
# # debug(f'thresholds = {thresholds}')
# # debug(f'f1_scores = {f1_scores}')
# best_th = thresholds[np.argmax(f1_scores)]
# best_ths.append(best_th)
# if save_graph_to is not None:
# plt.savefig(save_graph_to, dpi=100)
# debug(f'Saved graph to {save_graph_to}')
# debug(f'best_ths = {best_ths}')
# with open(cwd_slash('calibrated_score_threshold.json'), 'w') as f:
# json.dump(best_ths, f)
def predict_for_valid(config):
cwd_slash = gen_cwd_slash(config)
valid_windowed_anno = pd.read_csv(config['path_to_valid_windowed_anno_cache'], index_col=0)
predict(
config,
valid_windowed_anno,
cwd_slash('model.pth'),
save_numpy_to='valid_windowed_predicted.npy',
save_csv_to='valid_windowed_anno_predicted.csv',
target_col='corrected_target',
)
# predict(
# anno=cwd_slash('valid_windowed_anno.csv'),
# config=config,
# extension=config['img_extension'],
# folder=config['path_to_valid'],
# to_npy=cwd_slash('valid_windowed_scores.npy'),
# )
# def cut_score_for_valid(config):
# info('cut_score_for_valid()')
# cwd_slash = gen_cwd_slash(config)
# path_to_score = cwd_slash('calibrate_windowed_score.csv')
# if os.path.exists(path_to_score):
# tb = pd.read_csv(path_to_score)
# debug(f"read from {path_to_score}")
# score_threshold = tb.sort_values('i_class')['beta'].values
# debug(f'score_threshold = {score_threshold}')
# min_n_windows = tb.sort_values('i_class')['alpha'].values
# debug(f'min_n_windows = {min_n_windows}')
# else:
# debug(f'WARNING: using default score_threshold and min_n_windows')
# score_threshold = config['score_threshold']
# min_n_windows = 3
# # if os.path.exists(cwd_slash('calibrated_score_threshold.json')):
# # with open(cwd_slash('calibrated_score_threshold.json'), 'r') as f:
# # score_threshold = json.load(f)
# # else:
# # score_threshold = config['score_threshold']
# debug('cut_score()')
# cut_score(
# anno=cwd_slash('valid_windowed_anno.csv'),
# scores_mat=cwd_slash('valid_windowed_scores.npy'),
# config=config,
# prediction_col='Predicted',
# score_threshold=score_threshold,
# to_csv=cwd_slash('valid_windowed_predicted.csv'),
# )
# debug('combine_windows()')
# combine_windows(
# cwd_slash('valid_windowed_predicted.csv'),
# min_n_windows,
# config,
# save_combined_anno_to=cwd_slash('valid_predicted.csv'),
# group_col='source_img_id',
# )
def predict_for_test(config):
info('predict_for_test()')
cwd_slash = gen_cwd_slash(config)
test_windowed_anno = pd.read_csv(config['path_to_test_anno'], index_col=0)
test_windowed_anno = compute_i_coords(test_windowed_anno, config)
test_windowed_anno['group'] = 'test_full_size'
predict(
config,
test_windowed_anno,
cwd_slash('model.pth'),
save_numpy_to='test_windowed_predicted.npy',
save_csv_to='test_windowed_anno_predicted.csv',
)
# anno = pd.read_csv('./data/test_windowed.csv', index_col=0)
# if config['submission_subsampling'] is not None:
# anno = anno.sample(config['submission_subsampling'])
# predict(
# anno=anno,
# config=config,
# extension=config['img_extension'],
# folder=config['path_to_test'],
# to_npy=cwd_slash('test_windowed_scores.npy'),
# )
def create_csv_for_debugger(config):
info('create_csv_for_debugger()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'), index_col=0)
pred_mat = np.load(cwd_slash('valid_windowed_scores.npy'))
pred_anno = pd.DataFrame(pred_mat, columns=[f'score_of_{x}' for x in config['class_ids']], index=anno.index)
anno.join(pred_anno, how='left').to_csv(cwd_slash('valid_windowed_scores.csv'))
def take_top_n_for_test(config):
info('take_top_n_for_test()')
cwd_slash = gen_cwd_slash(config)
class_distrib = pd.read_csv('tmp/class_distribution.csv', index_col=0)
# test_scores = pd.read_csv(cwd_slash('test_aggregated_prediction.csv'), index_col=0)
test_scores = pd.read_csv(cwd_slash('stacking_v3_test.csv'), index_col=0)
# correct class 17 for LB613
# test_scores_LB613 = pd.read_csv(
# './working/__613__190104-001629__P1T500_/test_aggregated_prediction.csv', index_col=0
# )
# test_scores_LB613['17'] = test_scores['17']
# test_scores = test_scores_LB613
# test_scores = pd.read_csv('tmp/yuanhao.csv', index_col=0)
submission_df = pd.read_csv('data/sample_submission.csv', index_col=0)
test_scores = test_scores.loc[submission_df.index]
def get_order(col):
fixed_n_samples = class_distrib.loc[int(col.name), 'LB613']
if not np.isnan(fixed_n_samples):
n_samples = fixed_n_samples
else:
n_samples = class_distrib.loc[int(col.name), 'expected_n_samples_in_test'] * 1.2
return np.where(np.argsort(np.argsort(-col)) >= n_samples, 0, 1)
submission_df['Predicted'] = test_scores.apply(get_order).apply(vec_to_str, axis=1)
submission_df.to_csv(cwd_slash('submission_top_n_stacking_v3.csv'))
# submission_df.to_csv('tmp/yuanhao_submission.csv')
def cut_score_for_valid(config):
info('cut_score_for_valid()')
cwd_slash = gen_cwd_slash(config)
threshold_df = pd.read_csv(cwd_slash('calibrated_threshold_17_corrected.csv'), index_col=0)
thresholds = threshold_df['best_threshold']
valid_scores = pd.read_csv(cwd_slash('valid_aggregated_prediction_17_corrected.csv'), index_col=0)
submission_df = pd.read_csv(cwd_slash('valid_anno.csv'))
valid_scores = valid_scores.loc[submission_df['Id']]
pick_mat = valid_scores.values > [thresholds]
preds = [vec_to_str(row) for row in pick_mat]
submission_df['Predicted'] = preds
submission_df.to_csv(cwd_slash('valid_anno_predicted.csv'), index=False)
def cut_score_for_test(config):
info('cut_score_for_test()')
cwd_slash = gen_cwd_slash(config)
threshold_df = pd.read_csv(cwd_slash('calibrated_threshold.csv'), index_col=0)
# thresholds = threshold_df['best_threshold'] * 0.4
test_scores = pd.read_csv(cwd_slash('test_aggregated_prediction.csv'), index_col=0)
submission_df = pd.read_csv('data/sample_submission.csv')
test_scores = test_scores.loc[submission_df['Id']]
pick_mat = test_scores.values > [thresholds]
def get_order(col, n_samples):
return np.where(np.argsort(np.argsort(-col)) >= n_samples, 0, 1)
for class_id in test_scores:
i_class = int(class_id)
manual_top_n = threshold_df.loc[i_class, 'manual_top_n']
if not np.isnan(manual_top_n):
debug(f"manually set {class_id} to pick the top {manual_top_n}")
pick_vec = get_order(test_scores[class_id], manual_top_n)
pick_mat[:, i_class] = pick_vec
preds = [vec_to_str(row) for row in pick_mat]
submission_df['Predicted'] = preds
submission_df.to_csv(cwd_slash('submission.csv'), index=False)
def compare_with_best_submssion(config):
info('compare_with_best_submssion()')
cwd_slash = gen_cwd_slash(config)
best_submission = pd.read_csv(cwd_slash('submission_587.csv'), index_col=0)
current_submission = pd.read_csv(cwd_slash('test_predicted_corrected.csv'), index_col=0)
current_submission['best'] = best_submission['Predicted']
debug(f"index all equal = {np.all(current_submission.index.values == best_submission.index.values)}")
diff = current_submission.query('Predicted != best')
# DEBUG BEGIN
plot_imgs(
config,
diff.loc[[10 in (str_to_labels(row['Predicted']) + str_to_labels(row['best'])) for i, row in diff.iterrows()]],
folder='data/test_minimaps',
extension='jpg',
channel='green',
)
# DEBUG END
# debug(f"diff =\n{diff}")
save_path = './tmp/diff.csv'
diff.to_csv(save_path)
debug(f"saved to {save_path}")
def show_score_details(config, id_='94c0f350-bada-11e8-b2b9-ac1f6b6435d0'):
info('show_score_details()')
cwd_slash = gen_cwd_slash(config)
windowed_anno = pd.read_csv('./data/test_windowed.csv')
scores_mat = np.load(cwd_slash('test_windowed_scores.npy'))
idxs = windowed_anno.loc[windowed_anno['source_img_id'] == id_].index.values
print(pd.DataFrame(np.round(scores_mat[idxs, :], 3), index=windowed_anno.loc[idxs]['img_id']))
def aggregate_prediction_for_valid(config):
info('aggregate_prediction_for_valid()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('valid_windowed_anno_predicted.csv'))
score_cols = [str(class_id) for class_id in config['class_ids']]
anno_agg = anno.groupby('source_img_id')[score_cols].agg([np.mean, np.max])
result_df = pd.DataFrame(index=anno_agg.index)
for score_col in score_cols:
result_df[score_col] = (anno_agg[score_col, 'mean'] + anno_agg[score_col, 'amax']) / 2
# result_df[score_col] = anno_agg[score_col, 'mean']
print(result_df.head())
save_path = cwd_slash('valid_aggregated_prediction.csv')
result_df.to_csv(save_path)
debug(f"saved to {save_path}")
def aggregate_prediction_for_test(config):
info('aggregate_prediction_for_test()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('test_windowed_anno_predicted.csv'))
score_cols = [str(class_id) for class_id in config['class_ids']]
anno_agg = anno.groupby('source_img_id')[score_cols].agg([np.mean, np.max])
result_df = pd.DataFrame(index=anno_agg.index)
for score_col in score_cols:
result_df[score_col] = (anno_agg[score_col, 'mean'] + anno_agg[score_col, 'amax']) / 2
# result_df[score_col] = anno_agg[score_col, 'mean']
print(result_df.head())
save_path = cwd_slash('test_aggregated_prediction.csv')
result_df.to_csv(save_path)
debug(f"saved to {save_path}")
def calibrate_thresholds(config, epsilon=1e-7):
info('calibrate_thresholds()')
cwd_slash = gen_cwd_slash(config)
anno = | pd.read_csv(config['path_to_valid_anno_cache'], index_col=0) | pandas.read_csv |
import hydra
import pandas as pd
from sklearn import preprocessing
import os
from pathlib import Path
def handle_columns(cfg, df, median_dict, label_encoder, is_train=False):
# Add column for missing values
if cfg.data.add_missing_col_rep_income:
df['isNaN_rep_income'] = (df['rep_income'].isnull()).astype(int)
if cfg.data.add_missing_col_uti_card_50plus_pct:
df['isNaN_uti_card_50plus_pct'] = (df['uti_card_50plus_pct'].isnull()).astype(int)
# Replace missing values with median
df['rep_income'] = df['rep_income'].fillna(median_dict['rep_income'])
df['uti_card_50plus_pct'] = df['uti_card_50plus_pct'].fillna(median_dict['uti_card_50plus_pct'])
# Add 'isOld' col
if cfg.data.add_isOld_col:
df['isOld'] = (df['credit_age'] > cfg.data.isOld_value).astype(int)
# Remove white space from "auto_open_ 36_month_num"
df = df.rename(columns={"auto_open_ 36_month_num": "auto_open_36_month_num"})
# One-hot encode State column
df = pd.get_dummies(df, prefix='State', columns=['States'])
# Label 'Default_ind' using LabelEncoder
df.loc[:, 'Default_ind'] = label_encoder.transform(df['Default_ind'].values)
# # Balance the training data
if is_train:
default_ind_1 = df[df['Default_ind'] == 1]
default_ind_0 = df[df['Default_ind'] == 0].sample(frac=1).reset_index(drop=True)
default_ind_0 = default_ind_0.iloc[:len(default_ind_1)]
df = pd.concat([default_ind_0, default_ind_1])
df = df.sample(frac=1).reset_index(drop=True)
return df
@hydra.main(config_path='config', config_name='config')
def get_data(cfg):
orig_cwd = hydra.utils.get_original_cwd()
# Check if processed data already exists. If 'yes' then return it,
# else, preprocess the data and save it
if cfg.data.processed_data_exists:
path = Path(f'{orig_cwd}/{cfg.data.save_path}')
train_df = pd.read_csv(path/'train.csv')
valid_df = pd.read_csv(path/'valid.csv')
test_df = pd.read_csv(path/'test.csv')
return train_df, valid_df, test_df
else:
path = Path(f'{orig_cwd}/{cfg.data.path}')
train_df = pd.read_csv(path/'train.csv')
valid_df = pd.read_csv(path/'valid.csv')
test_df = | pd.read_csv(path/'test.csv') | pandas.read_csv |
import module
import os
import librosa
import soundfile as sf
import numpy as np
import glob
import pandas as pd
def extract_features():
# path to dataset containing 10 subdirectories of .ogg files
sub_dirs = os.listdir('data')
sub_dirs.sort()
features_list = []
for label, sub_dir in enumerate(sub_dirs):
for file_name in glob.glob(os.path.join('data',sub_dir,"*.ogg")):
print("Extracting file ", file_name)
try:
mfccs = module.get_features(file_name)
except Exception as e:
print("Extraction error")
continue
features_list.append([mfccs,label, sub_dir])
features_df = | pd.DataFrame(features_list,columns = ['feature','class_label', 'Directory']) | pandas.DataFrame |
"""
Developer : <NAME>
Description: Based on the Battery sensor Data, charger plug in time and duration of plug in time are extracted on a daily basis.
"""
#Importing the required libraries.
import collections as col
import functools
from collections import Counter
import pandas as pd
import FeatureExtraction.CommonFunctions.converters as converters
from FeatureExtraction.CommonFunctions import dataprocessing_helper
def TakeMostProbableTimeInStudy(study_values,day_values):
"""
Method to get most probable time based on give data.
Sometimes it is possible that partcipant can charge the mobile multiple times in a day,
in such cases we consider the most probable time of corresponding participant
occurred in the entire study period.
:param StudyValues: study charge time values
:param DayValues: charge time values for a given day
:return:
"""
#if total number of values in a day are one then return the only value i.e only one charger plugin time in a given day
if day_values.count ==1 :
return day_values
#more than one time found, hence get the day values and the time values for entire study
else:
#get the study time and day values values count
counter = Counter(study_values)
return functools.reduce(lambda max_key,current_key: max_key if counter[max_key]>counter[current_key] else current_key, study_values)
def get_charger_plugintime_daily(file):
"""
Method to compute the battery charger plug in time
:param file:
:return data frame:
"""
#read the data in to a dataframe
df= pd.read_csv(file)
#splitting datetime in to date and time columns
df['Date'], df['Time'] = zip(*df['start_time'].map(lambda x: x.split(' ')))
#removing rows with battery plugged status as o which is unplugged and converting the time to Integer for easy caliculations
df= df[df.plugged !=0]
df['Time'] =df['Time'].apply(converters.ConvertTime)
df['Time'] =df['Time'].apply(converters.ConvertToInt)
#getting the all plug in times for a particular participant in the entire study of 30 days.
tempdf = df
tempgrouping = tempdf.groupby(['user_id'])
batterychargeTimePerStudy= [(key,col.Counter(converters.ConvertToIntList(value['Time']))) for (key, value) in tempgrouping.__iter__()]
batterychargeTimePerStudydf= pd.DataFrame(batterychargeTimePerStudy,columns=['ID','Values'])
#grouping by date and userid
grouping = df.groupby(['user_id','Date'])
#Get battery time for each day by taking the most probable time in the entire study if there are more than one recod
batterychargeTime_perDay= [(key[0],key[1],TakeMostProbableTimeInStudy(batterychargeTimePerStudydf[batterychargeTimePerStudydf.ID ==key[0]],value['Time'])) for (key,value) in grouping.__iter__()]
outputdf= | pd.DataFrame(batterychargeTime_perDay,columns=['ID','Date','CharginTimeDaily']) | pandas.DataFrame |
import re
import pandas as pd
import numpy as np
from gensim import corpora, models, similarities
from difflib import SequenceMatcher
from build_tfidf import split
def ratio(w1, w2):
'''
Calculate the matching ratio between 2 words.
Only account for word pairs with at least 90% similarity
'''
m = SequenceMatcher(None, w1, w2)
r = m.ratio()
if r < 0.9: r = 0.0
return r
def build_features(data, tfidf, dictionary):
'''
Generate features:
1. Cosine similarity between tf-idf vectors of query vs. title
2. Cosine similarity between tf-idf vectors of query vs. description
3. Cosine similarity between tf-idf vectors of query vs. attribute text
4. Sum of word match ratios between query vs. title
5. Sum of word match ratios between query vs. description
6. Sum of word match ratios between query vs. attribute text
7. Query word count
'''
result = []
for loc in xrange(len(data)):
rowdata = data.loc[loc, ["product_title", "product_description", "attr_value", "search_term"]]
rowbow = [[str(text)] if isinstance(text, float) else split(text) for text in rowdata]
# query match level
titleMatch = descMatch = attrMatch = 0
for q in rowbow[3]:
titleMatch = titleMatch + np.sum(map(lambda w: ratio(q, w), rowbow[0]))
descMatch = descMatch + np.sum(map(lambda w: ratio(q, w), rowbow[1]))
attrMatch = attrMatch + np.sum(map(lambda w: ratio(q, w), rowbow[2]))
# get tfidf vectors
rowdata = [tfidf[dictionary.doc2bow(text)] for text in rowbow]
# prepare to get similarities
index = similarities.SparseMatrixSimilarity(rowdata[:3], num_features=len(dictionary))
# append everything to the result
result.append(np.concatenate((index[rowdata[3]], [titleMatch, descMatch, attrMatch, len(rowbow[3])]), axis=0).tolist())
# end loop
return np.array(result)
def main():
# load data
df_desc = pd.read_csv('data/product_descriptions.csv', encoding="ISO-8859-1")
df_attr = pd.read_csv('data/attributes_combined.csv', encoding="ISO-8859-1")
df_train = pd.read_csv('data/train.csv', encoding="ISO-8859-1")
df_train = pd.merge(df_train, df_desc, how='left', on='product_uid')
df_train = pd.merge(df_train, df_attr, how='left', on='product_uid')
df_test = pd.read_csv('data/test.csv', encoding="ISO-8859-1")
df_test = pd.merge(df_test, df_desc, how='left', on='product_uid')
df_test = pd.merge(df_test, df_attr, how='left', on='product_uid')
# load tfidf model
dictionary = corpora.Dictionary.load('homedepot.dict')
corpus = corpora.MmCorpus('homedepot.mm')
tfidf = models.TfidfModel.load('homedepot.tfidf')
# build features
trainData = build_features(df_train, tfidf, dictionary)
testData = build_features(df_test, tfidf, dictionary)
# save to csv
df = pd.DataFrame(trainData, columns=['qt', 'qd', 'qa', 'mt', 'md', 'ma', 'ql'])
df.to_csv('data/train_features.csv', index=False)
df = | pd.DataFrame(testData, columns=['qt', 'qd', 'qa', 'mt', 'md', 'ma', 'ql']) | pandas.DataFrame |
#Author: <NAME>
#Email: <EMAIL>
#Script uses a random forest classifier to predict loan defaults within the lending Club dataset
import os, errno, time, smtplib, ssl, pickle
from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
import sklearn.metrics as metrics
#Model Specific Packages
from sklearn.ensemble import RandomForestClassifier
#%%Global model output settings
#######################################################################################################################
#######################################################################################################################
model_name = 'Random Forest' #Name used in chart titles
save_path = r'C:\Users\mhr19\Dropbox\CODE\CONSUMER_DEBT\CL_RFC' #Directory for saving model output
scorer = 'roc_auc' #Scoring metric for Grid Search
start_time = time.time() #start time for execution timer
#######################################################################################################################
#######################################################################################################################
#%%Load Train/Val/Test Data Files
#######################################################################################################################
#######################################################################################################################
#Training CSV files
X_train = pd.read_csv(r'C:\Users\mhr19\Dropbox\CODE\CONSUMER_DEBT\DATA\TRAIN\loans_SMOTE_X_train_all.CSV')
y_train = | pd.read_csv(r'C:\Users\mhr19\Dropbox\CODE\CONSUMER_DEBT\DATA\TRAIN\loans_SMOTE_y_train_all.CSV') | pandas.read_csv |
# BSD 2-CLAUSE LICENSE
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# #ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# original author: <NAME>, <NAME>, <NAME>, <NAME>
"""Functions to generate derived time features useful
in forecasting, such as growth, seasonality, holidays.
"""
import inspect
import math
import warnings
from datetime import datetime
import fbprophet.hdays as fbholidays
import holidays
import numpy as np
import pandas as pd
from scipy.special import expit
from greykite.common import constants as cst
def convert_date_to_continuous_time(dt):
"""Converts date to continuous time. Each year is one unit.
Parameters
----------
dt : datetime object
the date to convert
Returns
-------
conti_date : `float`
the date represented in years
"""
year_length = datetime(dt.year, 12, 31).timetuple().tm_yday
tt = dt.timetuple()
return (dt.year +
(tt.tm_yday - 1
+ dt.hour / 24
+ dt.minute / (24 * 60)
+ dt.second / (24 * 3600)) / float(year_length))
def get_default_origin_for_time_vars(df, time_col):
"""Sets default value for origin_for_time_vars
Parameters
----------
df : `pandas.DataFrame`
Training data. A data frame which includes the timestamp and value columns
time_col : `str`
The column name in `df` representing time for the time series data.
Returns
-------
dt_continuous_time : `float`
The time origin used to create continuous variables for time
"""
date = pd.to_datetime(df[time_col][0])
return convert_date_to_continuous_time(date)
def build_time_features_df(dt, conti_year_origin):
"""This function gets a datetime-like vector and creates new columns containing temporal
features useful for time series analysis and forecasting e.g. year, week of year, etc.
Parameters
----------
dt : array-like (1-dimensional)
A vector of datetime-like values
conti_year_origin : float
The origin used for creating continuous time.
Returns
-------
time_features_df : `pandas.DataFrame`
Dataframe with the following time features.
* "datetime": `datetime.datetime` object, a combination of date and a time
* "date": `datetime.date` object, date with the format (year, month, day)
* "year": integer, year of the date e.g. 2018
* "year_length": integer, number of days in the year e.g. 365 or 366
* "quarter": integer, quarter of the date, 1, 2, 3, 4
* "quarter_start": `pandas.DatetimeIndex`, date of beginning of the current quarter
* "quarter_length": integer, number of days in the quarter, 90/91 for Q1, 91 for Q2, 92 for Q3 and Q4
* "month": integer, month of the year, January=1, February=2, ..., December=12
* "month_length": integer, number of days in the month, 28/ 29/ 30/ 31
* "woy": integer, ISO 8601 week of the year where a week starts from Monday, 1, 2, ..., 53
* "doy": integer, ordinal day of the year, 1, 2, ..., year_length
* "doq": integer, ordinal day of the quarter, 1, 2, ..., quarter_length
* "dom": integer, ordinal day of the month, 1, 2, ..., month_length
* "dow": integer, day of the week, Monday=1, Tuesday=2, ..., Sunday=7
* "str_dow": string, day of the week as a string e.g. "1-Mon", "2-Tue", ..., "7-Sun"
* "str_doy": string, day of the year e.g. "2020-03-20" for March 20, 2020
* "hour": integer, discrete hours of the datetime, 0, 1, ..., 23
* "minute": integer, minutes of the datetime, 0, 1, ..., 59
* "second": integer, seconds of the datetime, 0, 1, ..., 3599
* "year_month": string, (year, month) e.g. "2020-03" for March 2020
* "year_woy": string, (year, week of year) e.g. "2020_42" for 42nd week of 2020
* "month_dom": string, (month, day of month) e.g. "02/20" for February 20th
* "year_woy_dow": string, (year, week of year, day of week) e.g. "2020_03_6" for Saturday of 3rd week in 2020
* "woy_dow": string, (week of year, day of week) e.g. "03_6" for Saturday of 3rd week
* "dow_hr": string, (day of week, hour) e.g. "4_09" for 9am on Thursday
* "dow_hr_min": string, (day of week, hour, minute) e.g. "4_09_10" for 9:10am on Thursday
* "tod": float, time of day, continuous, 0.0 to 24.0
* "tow": float, time of week, continuous, 0.0 to 7.0
* "tom": float, standardized time of month, continuous, 0.0 to 1.0
* "toq": float, time of quarter, continuous, 0.0 to 1.0
* "toy": float, standardized time of year, continuous, 0.0 to 1.0
* "conti_year": float, year in continuous time, eg 2018.5 means middle of the year 2018
* "is_weekend": boolean, weekend indicator, True for weekend, else False
* "dow_grouped": string, Monday-Thursday=1234-MTuWTh, Friday=5-Fri, Saturday=6-Sat, Sunday=7-Sun
* "ct1": float, linear growth based on conti_year_origin, -infinity to infinity
* "ct2": float, signed quadratic growth, -infinity to infinity
* "ct3": float, signed cubic growth, -infinity to infinity
* "ct_sqrt": float, signed square root growth, -infinity to infinity
* "ct_root3": float, signed cubic root growth, -infinity to infinity
"""
dt = pd.DatetimeIndex(dt)
if len(dt) == 0:
raise ValueError("Length of dt cannot be zero.")
# basic time features
date = dt.date
year = dt.year
year_length = (365.0 + dt.is_leap_year)
quarter = dt.quarter
month = dt.month
month_length = dt.days_in_month
# finds first day of quarter
quarter_start = pd.DatetimeIndex(
dt.year.map(str) + "-" + (3 * quarter - 2).map(int).map(str) + "-01")
next_quarter_start = dt + pd.tseries.offsets.QuarterBegin(startingMonth=1)
quarter_length = (next_quarter_start - quarter_start).days
# finds offset from first day of quarter (rounds down to nearest day)
doq = ((dt - quarter_start) / pd.to_timedelta("1D") + 1).astype(int)
# week of year, "woy", follows ISO 8601:
# - Week 01 is the week with the year's first Thursday in it.
# - A week begins with Monday and ends with Sunday.
# So the week number of the week that overlaps both years, is 1, 52, or 53,
# depending on whether it has more days in the previous year or new year.
# - e.g. Jan 1st, 2018 is Monday. woy of first 8 days = [1, 1, 1, 1, 1, 1, 1, 2]
# - e.g. Jan 1st, 2019 is Tuesday. woy of first 8 days = [1, 1, 1, 1, 1, 1, 2, 2]
# - e.g. Jan 1st, 2020 is Wednesday. woy of first 8 days = [1, 1, 1, 1, 1, 2, 2, 2]
# - e.g. Jan 1st, 2015 is Thursday. woy of first 8 days = [1, 1, 1, 1, 2, 2, 2, 2]
# - e.g. Jan 1st, 2021 is Friday. woy of first 8 days = [53, 53, 53, 1, 1, 1, 1, 1]
# - e.g. Jan 1st, 2022 is Saturday. woy of first 8 days = [52, 52, 1, 1, 1, 1, 1, 1]
# - e.g. Jan 1st, 2023 is Sunday. woy of first 8 days = [52, 1, 1, 1, 1, 1, 1, 1]
woy = dt.strftime("%V").astype(int)
doy = dt.dayofyear
dom = dt.day
dow = dt.strftime("%u").astype(int)
str_dow = dt.strftime("%u-%a") # e.g. 1-Mon, 2-Tue, ..., 7-Sun
hour = dt.hour
minute = dt.minute
second = dt.second
# grouped time feature
str_doy = dt.strftime("%Y-%m-%d") # e.g. 2020-03-20 for March 20, 2020
year_month = dt.strftime("%Y-%m") # e.g. 2020-03 for March 2020
month_dom = dt.strftime("%m/%d") # e.g. 02/20 for February 20th
year_woy = dt.strftime("%Y_%V") # e.g. 2020_42 for 42nd week of 2020
year_woy_dow = dt.strftime("%Y_%V_%u") # e.g. 2020_03_6 for Saturday of 3rd week in 2020
woy_dow = dt.strftime("%W_%u") # e.g. 03_6 for Saturday of 3rd week
dow_hr = dt.strftime("%u_%H") # e.g. 4_09 for 9am on Thursday
dow_hr_min = dt.strftime("%u_%H_%M") # e.g. 4_09_10 for 9:10am on Thursday
# derived time features
tod = hour + (minute / 60.0) + (second / 3600.0)
tow = dow - 1 + (tod / 24.0)
tom = (dom - 1 + (tod / 24.0)) / month_length
toq = (doq - 1 + (tod / 24.0)) / quarter_length
# time of year, continuous, 0.0 to 1.0. e.g. Jan 1, 12 am = 0/365, Jan 2, 12 am = 1/365, ...
# To handle leap years, Feb 28 = 58/365 - 59/365, Feb 29 = 59/365, Mar 1 = 59/365 - 60/365
# offset term is nonzero only in leap years
# doy_offset reduces doy by 1 from from Mar 1st (doy > 60)
doy_offset = (year_length == 366) * 1.0 * (doy > 60)
# tod_offset sets tod to 0 on Feb 29th (doy == 60)
tod_offset = 1 - (year_length == 366) * 1.0 * (doy == 60)
toy = (doy - 1 - doy_offset + (tod / 24.0) * tod_offset) / 365.0
# year of date in continuous time, eg 2018.5 means middle of year 2018
# this is useful for modeling features that do not care about leap year e.g. environmental variables
conti_year = year + (doy - 1 + (tod / 24.0)) / year_length
is_weekend = pd.Series(dow).apply(lambda x: x in [6, 7]).values # weekend indicator
# categorical var with levels (Mon-Thu, Fri, Sat, Sun), could help when training data are sparse.
dow_grouped = pd.Series(str_dow).apply(lambda x: "1234-MTuWTh" if (x in ["1-Mon", "2-Tue", "3-Wed", "4-Thu"]) else x).values
# growth terms
ct1 = conti_year - conti_year_origin
ct2 = signed_pow(ct1, 2)
ct3 = signed_pow(ct1, 3)
ct_sqrt = signed_pow(ct1, 1/2)
ct_root3 = signed_pow(ct1, 1/3)
# All keys must be added to constants.
features_dict = {
"datetime": dt,
"date": date,
"year": year,
"year_length": year_length,
"quarter": quarter,
"quarter_start": quarter_start,
"quarter_length": quarter_length,
"month": month,
"month_length": month_length,
"woy": woy,
"doy": doy,
"doq": doq,
"dom": dom,
"dow": dow,
"str_dow": str_dow,
"str_doy": str_doy,
"hour": hour,
"minute": minute,
"second": second,
"year_month": year_month,
"year_woy": year_woy,
"month_dom": month_dom,
"year_woy_dow": year_woy_dow,
"woy_dow": woy_dow,
"dow_hr": dow_hr,
"dow_hr_min": dow_hr_min,
"tod": tod,
"tow": tow,
"tom": tom,
"toq": toq,
"toy": toy,
"conti_year": conti_year,
"is_weekend": is_weekend,
"dow_grouped": dow_grouped,
"ct1": ct1,
"ct2": ct2,
"ct3": ct3,
"ct_sqrt": ct_sqrt,
"ct_root3": ct_root3,
}
df = pd.DataFrame(features_dict)
return df
def add_time_features_df(df, time_col, conti_year_origin):
"""Adds a time feature data frame to a data frame
:param df: the input data frame
:param time_col: the name of the time column of interest
:param conti_year_origin: the origin of time for the continuous time variable
:return: the same data frame (df) augmented with new columns
"""
df = df.reset_index(drop=True)
time_df = build_time_features_df(
dt=df[time_col],
conti_year_origin=conti_year_origin)
time_df = time_df.reset_index(drop=True)
return pd.concat([df, time_df], axis=1)
def get_holidays(countries, year_start, year_end):
"""This function extracts a holiday data frame for the period of interest
[year_start to year_end] for the given countries.
This is done using the holidays libraries in pypi:fbprophet and pypi:holidays
Implementation resembles that of `~fbprophet.make_holidays.make_holidays_df`
Parameters
----------
countries : `list` [`str`]
countries for which we need holidays
year_start : `int`
first year of interest, inclusive
year_end : `int`
last year of interest, inclusive
Returns
-------
holiday_df_dict : `dict` [`str`, `pandas.DataFrame`]
- key: country name
- value: data frame with holidays for that country
Each data frame has two columns: EVENT_DF_DATE_COL, EVENT_DF_LABEL_COL
"""
country_holiday_dict = {}
year_list = list(range(year_start, year_end + 1))
for country in countries:
try:
# Fetch the holidays from fbprophet holiday set
# Suppress the following warning for India:
# "We only support Diwali and Holi holidays from 2010 to 2025"
if country in ["India", "IN"]:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
country_holidays = getattr(fbholidays, country)(years=year_list)
else:
country_holidays = getattr(fbholidays, country)(years=year_list)
except AttributeError:
# Fetch the holidays from pypi:holidays set
try:
country_holidays = getattr(holidays, country)(years=year_list)
except AttributeError:
raise AttributeError(f"Holidays in {country} are not currently supported!")
country_df = pd.DataFrame({
cst.EVENT_DF_DATE_COL: list(country_holidays.keys()),
cst.EVENT_DF_LABEL_COL: list(country_holidays.values())})
country_df[cst.EVENT_DF_DATE_COL] = pd.to_datetime(country_df[cst.EVENT_DF_DATE_COL])
country_holiday_dict[country] = country_df
return country_holiday_dict
def get_available_holiday_lookup_countries(countries=None):
"""Returns list of available countries for modeling holidays
:param countries: List[str]
only look for available countries in this set
:return: List[str]
list of available countries for modeling holidays
"""
fb_countries = [
name for name, obj in inspect.getmembers(fbholidays)
if inspect.isclass(obj) and obj.__module__ == fbholidays.__name__]
holidays_countries = [
name for name, obj in inspect.getmembers(holidays)
if inspect.isclass(obj) and obj.__module__ == holidays.__name__]
all_countries = set(fb_countries + holidays_countries)
if countries is not None:
countries = set(countries)
found_countries = all_countries.intersection(countries)
else:
found_countries = all_countries
found_countries.discard("HolidayBase") # edge case, remove if found
return sorted(list(found_countries))
def get_available_holidays_in_countries(
countries,
year_start,
year_end):
"""Returns a dictionary mapping each country to its holidays
between the years specified.
:param countries: List[str]
countries for which we need holidays
:param year_start: int
first year of interest
:param year_end: int
last year of interest
:return: Dict[str, List[str]]
key: country name
value: list of holidays in that country between [year_start, year_end]
"""
country_holiday_dict = get_holidays(countries, year_start, year_end)
country_holiday_list = {country: list(sorted(set(df[cst.EVENT_DF_LABEL_COL].values)))
for country, df in country_holiday_dict.items()}
return country_holiday_list
def get_available_holidays_across_countries(
countries,
year_start,
year_end):
"""Returns a list of holidays that occur any of the countries
between the years specified.
:param countries: List[str]
countries for which we need holidays
:param year_start: int
first year of interest
:param year_end: int
last year of interest
:return: List[str]
names of holidays in any of the countries between [year_start, year_end]
"""
country_holiday_list = get_available_holidays_in_countries(
countries=countries,
year_start=year_start,
year_end=year_end)
holiday_across_countries = {
holiday for country, holiday_list in country_holiday_list.items()
for holiday in holiday_list}
return list(sorted(holiday_across_countries))
def add_daily_events(
df,
event_df_dict,
date_col=cst.EVENT_DF_DATE_COL,
regular_day_label=cst.EVENT_DEFAULT):
"""For each key of event_df_dict, it adds a new column to a data frame (df)
with a date column (date_col).
Each new column will represent the events given for that key.
Notes
-----
As a side effect, the columns in ``event_df_dict`` are renamed.
Parameters
----------
df : `pandas.DataFrame`
The data frame which has a date column.
event_df_dict : `dict` [`str`, `pandas.DataFrame`]
A dictionary of data frames, each representing events data
for the corresponding key.
Values are DataFrames with two columns:
- The first column contains the date. Must be at the same
frequency as ``df[date_col]`` for proper join. Must be in a
format recognized by `pandas.to_datetime`.
- The second column contains the event label for each date
date_col : `str`
Column name in ``df`` that contains the dates for joining against
the events in ``event_df_dict``.
regular_day_label : `str`
The label used for regular days which are not "events".
Returns
-------
df_daily_events : `pandas.DataFrame`
An augmented data frame version of df with new label columns --
one for each key of ``event_df_dict``.
"""
df[date_col] = pd.to_datetime(df[date_col])
for label, event_df in event_df_dict.items():
event_df = event_df.copy()
new_col = f"{cst.EVENT_PREFIX}_{label}"
event_df.columns = [date_col, new_col]
event_df[date_col] = pd.to_datetime(event_df[date_col])
df = df.merge(event_df, on=date_col, how="left")
df[new_col] = df[new_col].fillna(regular_day_label)
return df
def add_event_window(
df,
time_col,
label_col,
time_delta="1D",
pre_num=1,
post_num=1,
events_name=""):
"""For a data frame of events with a time_col and label_col
it adds shifted events
prior and after the given events
For example if the event data frame includes the row
'2019-12-25, Christmas'
the function will produce dataframes with the events:
'2019-12-24, Christmas' and '2019-12-26, Christmas'
if pre_num and post_num are 1 or more.
:param df: pd.DataFrame
the events data frame with two columns 'time_col' and 'label_col'
:param time_col: str
The column with the timestamp of the events.
This can be daily but does not have to
:param label_col: str
the column with labels for the events
:param time_delta: str
the amount of the shift for each unit specified by a string
e.g. "1D" stands for one day delta
:param pre_num: int
the number of events to be added prior to the given event for each event in df
:param post_num: int
the number of events to be added after to the given event for each event in df
:param events_name: str
for each shift, we generate a new data frame
and those data frames will be stored in a dictionary with appropriate keys.
Each key starts with "events_name"
and follow up with:
"_minus_1", "_minus_2", "_plus_1", "_plus_2", ...
depending on pre_num and post_num
:return: dict[key: pd.Dataframe]
A dictionary of dataframes for each needed shift.
For example if pre_num=2 and post_num=3.
2 + 3 = 5 data frames will be stored in the return dictionary.
"""
df_dict = {}
pd_time_delta = pd.to_timedelta(time_delta)
for num in range(pre_num):
df0 = pd.DataFrame()
df0[time_col] = df[time_col] - (num + 1) * pd_time_delta
df0[label_col] = df[label_col]
df_dict[events_name + "_minus_" + f"{(num + 1):.0f}"] = df0
for num in range(post_num):
df0 = pd.DataFrame()
df0[time_col] = df[time_col] + (num + 1) * pd_time_delta
df0[label_col] = df[label_col]
df_dict[events_name + "_plus_" + f"{(num + 1):.0f}"] = df0
return df_dict
def get_evenly_spaced_changepoints_values(
df,
continuous_time_col="ct1",
n_changepoints=2):
"""Partitions interval into n_changepoints + 1 segments,
placing a changepoint at left endpoint of each segment.
The left most segment doesn't get a changepoint.
Changepoints should be determined from training data.
:param df: pd.DataFrame
training dataset. contains continuous_time_col
:param continuous_time_col: str
name of continuous time column (e.g. conti_year, ct1)
:param n_changepoints: int
number of changepoints requested
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
if not n_changepoints > 0:
raise ValueError("n_changepoints must be > 0")
n = df.shape[0]
n_steps = n_changepoints + 1
step_size = n / n_steps
indices = np.floor(np.arange(start=1, stop=n_steps) * step_size)
return df[continuous_time_col][indices].values
def get_evenly_spaced_changepoints_dates(
df,
time_col,
n_changepoints):
"""Partitions interval into n_changepoints + 1 segments,
placing a changepoint at left endpoint of each segment.
The left most segment doesn't get a changepoint.
Changepoints should be determined from training data.
:param df: pd.DataFrame
training dataset. contains continuous_time_col
:param time_col: str
name of time column
:param n_changepoints: int
number of changepoints requested
:return: pd.Series
values of df[time_col] at the changepoints
"""
if not n_changepoints >= 0:
raise ValueError("n_changepoints must be >= 0")
changepoint_indices = np.floor(np.arange(start=1, stop=n_changepoints + 1) * (df.shape[0] / (n_changepoints + 1)))
changepoint_indices = df.index[np.concatenate([[0], changepoint_indices.astype(int)])]
return df.loc[changepoint_indices, time_col]
def get_custom_changepoints_values(
df,
changepoint_dates,
time_col=cst.TIME_COL,
continuous_time_col="ct1"):
"""Returns the values of continuous_time_col at the
requested changepoint_dates.
:param df: pd.DataFrame
training dataset. contains continuous_time_col and time_col
:param changepoint_dates: Iterable[Union[int, float, str, datetime]]
Changepoint dates, interpreted by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset
:param time_col: str
The column name in `df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
:param continuous_time_col: str
name of continuous time column (e.g. conti_year, ct1)
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
ts = pd.to_datetime(df[time_col])
changepoint_dates = pd.to_datetime(changepoint_dates)
# maps each changepoint to first date >= changepoint in the dataframe
# if there is no such date, the changepoint is dropped (it would not be useful anyway)
changepoint_ts = [ts[ts >= date].min() for date in changepoint_dates if any(ts >= date)]
indices = ts.isin(changepoint_ts)
changepoints = df[indices][continuous_time_col].values
if changepoints.shape[0] == 0:
changepoints = None
return changepoints
def get_changepoint_string(changepoint_dates):
"""Gets proper formatted strings for changepoint dates.
The default format is "_%Y_%m_%d_%H". When necessary, it appends "_%M" or "_%M_%S".
Parameters
----------
changepoint_dates : `list`
List of changepoint dates, parsable by `pandas.to_datetime`.
Returns
-------
date_strings : `list[`str`]`
List of string formatted changepoint dates.
"""
changepoint_dates = list(pd.to_datetime(changepoint_dates))
time_format = "_%Y_%m_%d_%H"
if any([stamp.second != 0 for stamp in changepoint_dates]):
time_format += "_%M_%S"
elif any([stamp.minute != 0 for stamp in changepoint_dates]):
time_format += "_%M"
date_strings = [date.strftime(time_format) for date in changepoint_dates]
return date_strings
def get_changepoint_features(
df,
changepoint_values,
continuous_time_col="ct1",
growth_func=None,
changepoint_dates=None):
"""Returns features for growth terms with continuous time origins at
the changepoint_values (locations) specified
Generates a time series feature for each changepoint:
Let t = continuous_time value, c = changepoint value
Then the changepoint feature value at time point t is
`growth_func(t - c) * I(t >= c)`, where I is the indicator function
This represents growth as a function of time, where the time origin is
the changepoint
In the typical case where growth_func(0) = 0 (has origin at 0),
the total effect of the changepoints is continuous in time.
If `growth_func` is the identity function, and `continuous_time`
represents the year in continuous time, these terms form the basis for a
continuous, piecewise linear curve to the growth trend.
Fitting these terms with linear model, the coefficents represent slope
change at each changepoint
Intended usage
----------
To make predictions (on test set)
Allow growth term as a function of time to change at these points.
Parameters
----------
:param df: pd.Dataframe
The dataset to make predictions. Contains column continuous_time_col.
:param changepoint_values: array-like
List of changepoint values (on same scale as df[continuous_time_col]).
Should be determined from training data
:param continuous_time_col: Optional[str]
Name of continuous time column in df
growth_func is applied to this column to generate growth term
If None, uses "ct1", linear growth
:param growth_func: Optional[callable]
Growth function for defining changepoints (scalar -> scalar).
If None, uses identity function to use continuous_time_col directly
as growth term
:param changepoint_dates: Optional[list]
List of change point dates, parsable by `pandas.to_datetime`.
:return: pd.DataFrame, shape (df.shape[0], len(changepoints))
Changepoint features, 0-indexed
"""
if continuous_time_col is None:
continuous_time_col = "ct1"
if growth_func is None:
def growth_func(x):
return x
if changepoint_dates is not None:
time_postfixes = get_changepoint_string(changepoint_dates)
else:
time_postfixes = [""] * len(changepoint_values)
changepoint_df = pd.DataFrame()
for i, changepoint in enumerate(changepoint_values):
time_feature = np.array(df[continuous_time_col]) - changepoint # shifted time column (t - c_i)
growth_term = np.array([growth_func(max(x, 0)) for x in time_feature]) # growth as a function of time
time_feature_ind = time_feature >= 0 # Indicator(t >= c_i), lets changepoint take effect starting at c_i
new_col = growth_term * time_feature_ind
new_changepoint = pd.Series(new_col, name=f"{cst.CHANGEPOINT_COL_PREFIX}{i}{time_postfixes[i]}")
changepoint_df = pd.concat([changepoint_df, new_changepoint], axis=1)
return changepoint_df
def get_changepoint_values_from_config(
changepoints_dict,
time_features_df,
time_col=cst.TIME_COL):
"""Applies the changepoint method specified in `changepoints_dict` to return the changepoint values
:param changepoints_dict: Optional[Dict[str, any]]
Specifies the changepoint configuration.
"method": str
The method to locate changepoints. Valid options:
"uniform". Places n_changepoints evenly spaced changepoints to allow growth to change.
"custom". Places changepoints at the specified dates.
Additional keys to provide parameters for each particular method are described below.
"continuous_time_col": Optional[str]
Column to apply `growth_func` to, to generate changepoint features
Typically, this should match the growth term in the model
"growth_func": Optional[func]
Growth function (scalar -> scalar). Changepoint features are created
by applying `growth_func` to "continuous_time_col" with offsets.
If None, uses identity function to use `continuous_time_col` directly
as growth term
If changepoints_dict["method"] == "uniform", this other key is required:
"n_changepoints": int
number of changepoints to evenly space across training period
If changepoints_dict["method"] == "custom", this other key is required:
"dates": Iterable[Union[int, float, str, datetime]]
Changepoint dates. Must be parsable by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset.
:param time_features_df: pd.Dataframe
training dataset. contains column "continuous_time_col"
:param time_col: str
The column name in `time_features_df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
Used only in the "custom" method.
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
changepoint_values = None
if changepoints_dict is not None:
valid_changepoint_methods = ["uniform", "custom"]
changepoint_method = changepoints_dict.get("method")
continuous_time_col = changepoints_dict.get("continuous_time_col")
if changepoint_method is None:
raise Exception("changepoint method must be specified")
if changepoint_method not in valid_changepoint_methods:
raise NotImplementedError(
f"changepoint method {changepoint_method} not recognized. "
f"Must be one of {valid_changepoint_methods}")
if changepoint_method == "uniform":
if changepoints_dict["n_changepoints"] > 0:
params = {"continuous_time_col": continuous_time_col} if continuous_time_col is not None else {}
changepoint_values = get_evenly_spaced_changepoints_values(
df=time_features_df,
n_changepoints=changepoints_dict["n_changepoints"],
**params)
elif changepoint_method == "custom":
params = {}
if time_col is not None:
params["time_col"] = time_col
if continuous_time_col is not None:
params["continuous_time_col"] = continuous_time_col
changepoint_values = get_custom_changepoints_values(
df=time_features_df,
changepoint_dates=changepoints_dict["dates"],
**params)
return changepoint_values
def get_changepoint_features_and_values_from_config(
df,
time_col,
changepoints_dict=None,
origin_for_time_vars=None):
"""Extracts changepoints from changepoint configuration and input data
:param df: pd.DataFrame
Training data. A data frame which includes the timestamp and value columns
:param time_col: str
The column name in `df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
:param changepoints_dict: Optional[Dict[str, any]]
Specifies the changepoint configuration.
"method": str
The method to locate changepoints. Valid options:
"uniform". Places n_changepoints evenly spaced changepoints to allow growth to change.
"custom". Places changepoints at the specified dates.
Additional keys to provide parameters for each particular method are described below.
"continuous_time_col": Optional[str]
Column to apply `growth_func` to, to generate changepoint features
Typically, this should match the growth term in the model
"growth_func": Optional[func]
Growth function (scalar -> scalar). Changepoint features are created
by applying `growth_func` to "continuous_time_col" with offsets.
If None, uses identity function to use `continuous_time_col` directly
as growth term
If changepoints_dict["method"] == "uniform", this other key is required:
"n_changepoints": int
number of changepoints to evenly space across training period
If changepoints_dict["method"] == "custom", this other key is required:
"dates": Iterable[Union[int, float, str, datetime]]
Changepoint dates. Must be parsable by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset.
:param origin_for_time_vars: Optional[float]
The time origin used to create continuous variables for time
:return: Dict[str, any]
Dictionary with the requested changepoints and associated information
changepoint_df: pd.DataFrame, shape (df.shape[0], len(changepoints))
Changepoint features for modeling the training data
changepoint_values: array-like
List of changepoint values (on same scale as df[continuous_time_col])
Can be used to generate changepoints for prediction.
continuous_time_col: Optional[str]
Name of continuous time column in df
growth_func is applied to this column to generate growth term.
If None, uses "ct1", linear growth
Can be used to generate changepoints for prediction.
growth_func: Optional[callable]
Growth function for defining changepoints (scalar -> scalar).
If None, uses identity function to use continuous_time_col directly
as growth term.
Can be used to generate changepoints for prediction.
changepoint_cols: List[str]
Names of the changepoint columns for modeling
"""
# extracts changepoint values
if changepoints_dict is None:
changepoint_values = None
continuous_time_col = None
growth_func = None
else:
if origin_for_time_vars is None:
origin_for_time_vars = get_default_origin_for_time_vars(df, time_col)
time_features_df = build_time_features_df(
df[time_col],
conti_year_origin=origin_for_time_vars)
changepoint_values = get_changepoint_values_from_config(
changepoints_dict=changepoints_dict,
time_features_df=time_features_df,
time_col="datetime") # datetime column generated by `build_time_features_df`
continuous_time_col = changepoints_dict.get("continuous_time_col")
growth_func = changepoints_dict.get("growth_func")
# extracts changepoint column names
if changepoint_values is None:
changepoint_df = None
changepoint_cols = []
else:
if changepoints_dict is None:
changepoint_dates = None
elif changepoints_dict["method"] == "custom":
changepoint_dates = list(pd.to_datetime(changepoints_dict["dates"]))
elif changepoints_dict["method"] == "uniform":
changepoint_dates = get_evenly_spaced_changepoints_dates(
df=df,
time_col=time_col,
n_changepoints=changepoints_dict["n_changepoints"]
).tolist()[1:] # the changepoint features does not include the growth term
else:
changepoint_dates = None
changepoint_df = get_changepoint_features(
df=time_features_df,
changepoint_values=changepoint_values,
continuous_time_col=continuous_time_col,
growth_func=growth_func,
changepoint_dates=changepoint_dates)
changepoint_cols = list(changepoint_df.columns)
return {
"changepoint_df": changepoint_df,
"changepoint_values": changepoint_values,
"continuous_time_col": continuous_time_col,
"growth_func": growth_func,
"changepoint_cols": changepoint_cols
}
def get_changepoint_dates_from_changepoints_dict(
changepoints_dict,
df=None,
time_col=None):
"""Gets the changepoint dates from ``changepoints_dict``
Parameters
----------
changepoints_dict : `dict` or `None`
The ``changepoints_dict`` which is compatible with
`~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast.forecast`
df : `pandas.DataFrame` or `None`, default `None`
The data df to put changepoints on.
time_col : `str` or `None`, default `None`
The column name of time column in ``df``.
Returns
-------
changepoint_dates : `list`
List of changepoint dates.
"""
if (changepoints_dict is None
or "method" not in changepoints_dict.keys()
or changepoints_dict["method"] not in ["auto", "uniform", "custom"]):
return None
method = changepoints_dict["method"]
if method == "custom":
# changepoints_dict["dates"] is `Iterable`, converts to list
changepoint_dates = list(changepoints_dict["dates"])
elif method == "uniform":
if df is None or time_col is None:
raise ValueError("When the method of ``changepoints_dict`` is 'uniform', ``df`` and "
"``time_col`` must be provided.")
changepoint_dates = get_evenly_spaced_changepoints_dates(
df=df,
time_col=time_col,
n_changepoints=changepoints_dict["n_changepoints"]
)
# the output is `pandas.Series`, converts to list
changepoint_dates = changepoint_dates.tolist()[1:]
else:
raise ValueError("The method of ``changepoints_dict`` can not be 'auto'. "
"Please specify or detect change points first.")
return changepoint_dates
def add_event_window_multi(
event_df_dict,
time_col,
label_col,
time_delta="1D",
pre_num=1,
post_num=1,
pre_post_num_dict=None):
"""For a given dictionary of events data frames with a time_col and label_col
it adds shifted events prior and after the given events
For example if the event data frame includes the row '2019-12-25, Christmas' as a row
the function will produce dataframes with the events '2019-12-24, Christmas' and '2019-12-26, Christmas' if
pre_num and post_num are 1 or more.
Parameters
----------
event_df_dict: `dict` [`str`, `pandas.DataFrame`]
A dictionary of events data frames
with each having two columns: ``time_col`` and ``label_col``.
time_col: `str`
The column with the timestamp of the events.
This can be daily but does not have to be.
label_col : `str`
The column with labels for the events.
time_delta : `str`, default "1D"
The amount of the shift for each unit specified by a string
e.g. '1D' stands for one day delta
pre_num : `int`, default 1
The number of events to be added prior to the given event for each event in df.
post_num: `int`, default 1
The number of events to be added after to the given event for each event in df.
pre_post_num_dict : `dict` [`str`, (`int`, `int`)] or None, default None
Optionally override ``pre_num`` and ``post_num`` for each key in ``event_df_dict``.
For example, if ``event_df_dict`` has keys "US" and "India", this parameter
can be set to ``pre_post_num_dict = {"US": [1, 3], "India": [1, 2]}``,
denoting that the "US" ``pre_num`` is 1 and ``post_num`` is 3, and "India" ``pre_num`` is 1
and ``post_num`` is 2. Keys not specified by ``pre_post_num_dict`` use the default given by
``pre_num`` and ``post_num``.
Returns
-------
df : `dict` [`str`, `pandas.DataFrame`]
A dictionary of dataframes for each needed shift. For example if pre_num=2 and post_num=3.
2 + 3 = 5 data frames will be stored in the return dictionary.
"""
if pre_post_num_dict is None:
pre_post_num_dict = {}
shifted_df_dict = {}
for event_df_key, event_df in event_df_dict.items():
if event_df_key in pre_post_num_dict.keys():
pre_num0 = pre_post_num_dict[event_df_key][0]
post_num0 = pre_post_num_dict[event_df_key][1]
else:
pre_num0 = pre_num
post_num0 = post_num
df_dict0 = add_event_window(
df=event_df,
time_col=time_col,
label_col=label_col,
time_delta=time_delta,
pre_num=pre_num0,
post_num=post_num0,
events_name=event_df_key)
shifted_df_dict.update(df_dict0)
return shifted_df_dict
def get_fourier_col_name(k, col_name, function_name="sin", seas_name=None):
"""Returns column name corresponding to a particular fourier term, as returned by fourier_series_fcn
:param k: int
fourier term
:param col_name: str
column in the dataframe used to generate fourier series
:param function_name: str
sin or cos
:param seas_name: strcols_interact
appended to new column names added for fourier terms
:return: str
column name in DataFrame returned by fourier_series_fcn
"""
# patsy doesn't allow "." in formula term. Replace "." with "_" rather than quoting "Q()" all fourier terms
name = f"{function_name}{k:.0f}_{col_name}"
if seas_name is not None:
name = f"{name}_{seas_name}"
return name
def fourier_series_fcn(col_name, period=1.0, order=1, seas_name=None):
"""Generates a function which creates fourier series matrix for a column of an input df
:param col_name: str
is the column name in the dataframe which is to be used for
generating fourier series. It needs to be a continuous variable.
:param period: float
the period of the fourier series
:param order: int
the order of the fourier series
:param seas_name: Optional[str]
appended to new column names added for fourier terms.
Useful to distinguish multiple fourier
series on same col_name with different periods.
:return: callable
a function which can be applied to any data.frame df
with a column name being equal to col_name
"""
def fs_func(df):
out_df = | pd.DataFrame() | pandas.DataFrame |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| :class:`pandas.Series` functions and operators implementations in SDC
| Also, it contains Numba internal operators which are required for Series type handling
"""
import numba
import numpy
import operator
import pandas
import math
import sys
from numba.errors import TypingError
from numba.extending import overload, overload_method, overload_attribute
from numba.typing import signature
from numba.extending import intrinsic
from numba import (types, numpy_support, cgutils)
from numba.typed import Dict
from numba import prange
import sdc
import sdc.datatypes.common_functions as common_functions
from sdc.datatypes.common_functions import (TypeChecker, check_index_is_numeric, find_common_dtype_from_numpy_dtypes,
sdc_join_series_indexes)
from sdc.datatypes.hpat_pandas_series_rolling_types import _hpat_pandas_series_rolling_init
from sdc.datatypes.hpat_pandas_stringmethods_types import StringMethodsType
from sdc.datatypes.hpat_pandas_getitem_types import SeriesGetitemAccessorType
from sdc.hiframes.pd_series_type import SeriesType
from sdc.str_arr_ext import (StringArrayType, string_array_type, str_arr_is_na, str_arr_set_na,
num_total_chars, pre_alloc_string_array, cp_str_list_to_array)
from sdc.utils import to_array, sdc_overload, sdc_overload_method, sdc_overload_attribute
from sdc.datatypes import hpat_pandas_series_autogenerated
@sdc_overload(operator.getitem)
def hpat_pandas_series_accessor_getitem(self, idx):
"""
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesGetitemAccessorType):
return None
accessor = self.accessor.literal_value
if accessor == 'iloc':
if isinstance(idx, (types.List, types.Array, types.SliceType)):
def hpat_pandas_series_iloc_list_slice_impl(self, idx):
result_data = self._series._data[idx]
result_index = self._series.index[idx]
return pandas.Series(result_data, result_index, self._series._name)
return hpat_pandas_series_iloc_list_slice_impl
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iloc_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iloc_impl
def hpat_pandas_series_iloc_callable_impl(self, idx):
index = numpy.asarray(list(map(idx, self._series._data)))
return pandas.Series(self._series._data[index], self._series.index[index], self._series._name)
return hpat_pandas_series_iloc_callable_impl
raise TypingError('{} The index must be an Integer, Slice or List of Integer or a callable.\
Given: {}'.format(_func_name, idx))
if accessor == 'iat':
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iat_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iat_impl
raise TypingError('{} The index must be a Integer. Given: {}'.format(_func_name, idx))
if accessor == 'loc':
# Note: Loc return Series
# Note: Index 0 in slice not supported
# Note: Loc slice and callable with String not implement
index_is_none = (self.series.index is None or
isinstance(self.series.index, numba.types.misc.NoneType))
if isinstance(idx, types.SliceType) and index_is_none:
def hpat_pandas_series_loc_slice_noidx_impl(self, idx):
max_slice = sys.maxsize
start = idx.start
stop = idx.stop
if idx.stop == max_slice:
stop = max_slice - 1
result_data = self._series._data[start:stop+1]
result_index = numpy.arange(start, stop + 1)
return pandas.Series(result_data, result_index, self._series._name)
return hpat_pandas_series_loc_slice_noidx_impl
if isinstance(idx, (int, types.Integer, types.UnicodeType, types.StringLiteral)):
def hpat_pandas_series_loc_impl(self, idx):
index = self._series.index
mask = numpy.empty(len(self._series._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return pandas.Series(self._series._data[mask], index[mask], self._series._name)
return hpat_pandas_series_loc_impl
raise TypingError('{} The index must be an Number, Slice, String, List, Array or a callable.\
Given: {}'.format(_func_name, idx))
if accessor == 'at':
if isinstance(idx, (int, types.Integer, types.UnicodeType, types.StringLiteral)):
def hpat_pandas_series_at_impl(self, idx):
index = self._series.index
mask = numpy.empty(len(self._series._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return self._series._data[mask]
return hpat_pandas_series_at_impl
raise TypingError('{} The index must be a Number or String. Given: {}'.format(_func_name, idx))
raise TypingError('{} Unknown accessor. Only "loc", "iloc", "at", "iat" are supported.\
Given: {}'.format(_func_name, accessor))
@sdc_overload(operator.getitem)
def hpat_pandas_series_getitem(self, idx):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.get
Limitations
-----------
Supported ``key`` can be one of the following:
- Integer scalar, e.g. :obj:`series[0]`
- A slice, e.g. :obj:`series[2:5]`
- Another series
Examples
--------
.. literalinclude:: ../../../examples/series_getitem.py
:language: python
:lines: 27-
:caption: Getting Pandas Series elements
:name: ex_series_getitem
.. command-output:: python ./series_getitem.py
:cwd: ../../../examples
.. todo:: Fix SDC behavior and add the expected output of the > python ./series_getitem.py to the docstring
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesType):
return None
# Note: Getitem return Series
index_is_none = isinstance(self.index, numba.types.misc.NoneType)
index_is_none_or_numeric = index_is_none or (self.index and isinstance(self.index.dtype, types.Number))
index_is_string = not index_is_none and isinstance(self.index.dtype, (types.UnicodeType, types.StringLiteral))
if (
isinstance(idx, types.Number) and index_is_none_or_numeric or
(isinstance(idx, (types.UnicodeType, types.StringLiteral)) and index_is_string)
):
def hpat_pandas_series_getitem_index_impl(self, idx):
index = self.index
mask = numpy.empty(len(self._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return pandas.Series(self._data[mask], index[mask], self._name)
return hpat_pandas_series_getitem_index_impl
if (isinstance(idx, types.Integer) and index_is_string):
def hpat_pandas_series_idx_impl(self, idx):
return self._data[idx]
return hpat_pandas_series_idx_impl
if isinstance(idx, types.SliceType):
# Return slice for str values not implement
def hpat_pandas_series_getitem_idx_slice_impl(self, idx):
return pandas.Series(self._data[idx], self.index[idx], self._name)
return hpat_pandas_series_getitem_idx_slice_impl
if (
isinstance(idx, (types.List, types.Array)) and
isinstance(idx.dtype, (types.Boolean, bool))
):
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
return pandas.Series(self._data[idx], self.index[idx], self._name)
return hpat_pandas_series_getitem_idx_list_impl
if (index_is_none and isinstance(idx, SeriesType)):
if isinstance(idx.data.dtype, (types.Boolean, bool)):
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
index = numpy.arange(len(self._data))
if (index != idx.index).sum() == 0:
return pandas.Series(self._data[idx._data], index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_list_impl
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
res = numpy.copy(self._data[:len(idx._data)])
index = numpy.arange(len(self._data))
for i in numba.prange(len(res)):
for j in numba.prange(len(index)):
if j == idx._data[i]:
res[i] = self._data[j]
return pandas.Series(res, index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_list_impl
if (isinstance(idx, SeriesType) and not isinstance(self.index, types.NoneType)):
if isinstance(idx.data.dtype, (types.Boolean, bool)):
# Series with str index not implement
def hpat_pandas_series_getitem_idx_series_impl(self, idx):
if (self._index != idx._index).sum() == 0:
return pandas.Series(self._data[idx._data], self._index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_series_impl
def hpat_pandas_series_getitem_idx_series_impl(self, idx):
index = self.index
data = self._data
size = len(index)
data_res = []
index_res = []
for value in idx._data:
mask = numpy.zeros(shape=size, dtype=numpy.bool_)
for i in numba.prange(size):
mask[i] = index[i] == value
data_res.extend(data[mask])
index_res.extend(index[mask])
return pandas.Series(data=data_res, index=index_res, name=self._name)
return hpat_pandas_series_getitem_idx_series_impl
raise TypingError('{} The index must be an Number, Slice, String, Boolean Array or a Series.\
Given: {}'.format(_func_name, idx))
@sdc_overload(operator.setitem)
def hpat_pandas_series_setitem(self, idx, value):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.__setitem__
Examples
--------
.. literalinclude:: ../../../examples/series_setitem_int.py
:language: python
:lines: 27-
:caption: Setting Pandas Series elements
:name: ex_series_setitem
.. code-block:: console
> python ./series_setitem_int.py
0 0
1 4
2 3
3 2
4 1
dtype: int64
> python ./series_setitem_slice.py
0 5
1 4
2 0
3 0
4 0
dtype: int64
> python ./series_setitem_series.py
0 5
1 0
2 3
3 0
4 1
dtype: int64
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series operator :attr:`pandas.Series.set` implementation
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_setitem*
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
value: :object
input value
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
ty_checker = TypeChecker('Operator setitem.')
ty_checker.check(self, SeriesType)
if not (isinstance(idx, (types.Integer, types.SliceType, SeriesType))):
ty_checker.raise_exc(idx, 'int, Slice, Series', 'idx')
if not((isinstance(value, SeriesType) and isinstance(value.dtype, self.dtype)) or \
isinstance(value, type(self.dtype))):
ty_checker.raise_exc(value, self.dtype, 'value')
if isinstance(idx, types.Integer) or isinstance(idx, types.SliceType):
def hpat_pandas_series_setitem_idx_integer_impl(self, idx, value):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_value
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_slice
"""
self._data[idx] = value
return self
return hpat_pandas_series_setitem_idx_integer_impl
if isinstance(idx, SeriesType):
def hpat_pandas_series_setitem_idx_series_impl(self, idx, value):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_series
"""
super_index = idx._data
self._data[super_index] = value
return self
return hpat_pandas_series_setitem_idx_series_impl
@sdc_overload_attribute(SeriesType, 'iloc')
def hpat_pandas_series_iloc(self):
"""
Pandas Series method :meth:`pandas.Series.iloc` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_iloc*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute iloc().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_iloc_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'iloc')
return hpat_pandas_series_iloc_impl
@sdc_overload_attribute(SeriesType, 'loc')
def hpat_pandas_series_loc(self):
"""
Pandas Series method :meth:`pandas.Series.loc` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_loc*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute loc().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_loc_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'loc')
return hpat_pandas_series_loc_impl
@sdc_overload_attribute(SeriesType, 'iat')
def hpat_pandas_series_iat(self):
"""
Pandas Series method :meth:`pandas.Series.iat` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_iat*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute iat().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_iat_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'iat')
return hpat_pandas_series_iat_impl
@sdc_overload_attribute(SeriesType, 'at')
def hpat_pandas_series_at(self):
"""
Pandas Series method :meth:`pandas.Series.at` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_at*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute at().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_at_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'at')
return hpat_pandas_series_at_impl
@sdc_overload_method(SeriesType, 'nsmallest')
def hpat_pandas_series_nsmallest(self, n=5, keep='first'):
"""
Pandas Series method :meth:`pandas.Series.nsmallest` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_nsmallest*
Parameters
----------
self: :obj:`pandas.Series`
input series
n: :obj:`int`, default 5
Return this many ascending sorted values.
keep: :obj:`str`, default 'first'
When there are duplicate values that cannot all fit in a Series of n elements:
first : return the first n occurrences in order of appearance.
last : return the last n occurrences in reverse order of appearance.
all : keep all occurrences. This can result in a Series of size larger than n.
*unsupported*
Returns
-------
:obj:`series`
returns :obj:`series`
"""
_func_name = 'Method nsmallest().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object\n given: {}\n expected: {}'.format(_func_name, self, 'series'))
if not isinstance(n, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object n\n given: {}\n expected: {}'.format(_func_name, n, 'int'))
if not isinstance(keep, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} The object keep\n given: {}\n expected: {}'.format(_func_name, keep, 'str'))
def hpat_pandas_series_nsmallest_impl(self, n=5, keep='first'):
if keep != 'first':
raise ValueError("Method nsmallest(). Unsupported parameter. Given 'keep' != 'first'")
# mergesort is used for stable sorting of repeated values
indices = self._data.argsort(kind='mergesort')[:max(n, 0)]
return self.take(indices)
return hpat_pandas_series_nsmallest_impl
@sdc_overload_method(SeriesType, 'nlargest')
def hpat_pandas_series_nlargest(self, n=5, keep='first'):
"""
Pandas Series method :meth:`pandas.Series.nlargest` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_nlargest*
Parameters
----------
self: :obj:`pandas.Series`
input series
n: :obj:`int`, default 5
Return this many ascending sorted values.
keep: :obj:`str`, default 'first'
When there are duplicate values that cannot all fit in a Series of n elements:
first : return the first n occurrences in order of appearance.
last : return the last n occurrences in reverse order of appearance.
all : keep all occurrences. This can result in a Series of size larger than n.
*unsupported*
Returns
-------
:obj:`series`
returns :obj:`series`
"""
_func_name = 'Method nlargest().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object\n given: {}\n expected: {}'.format(_func_name, self, 'series'))
if not isinstance(n, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object n\n given: {}\n expected: {}'.format(_func_name, n, 'int'))
if not isinstance(keep, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} The object keep\n given: {}\n expected: {}'.format(_func_name, keep, 'str'))
def hpat_pandas_series_nlargest_impl(self, n=5, keep='first'):
if keep != 'first':
raise ValueError("Method nlargest(). Unsupported parameter. Given 'keep' != 'first'")
# data: [0, 1, -1, 1, 0] -> [1, 1, 0, 0, -1]
# index: [0, 1, 2, 3, 4] -> [1, 3, 0, 4, 2] (not [3, 1, 4, 0, 2])
# subtract 1 to ensure reverse ordering at boundaries
indices = (-self._data - 1).argsort(kind='mergesort')[:max(n, 0)]
return self.take(indices)
return hpat_pandas_series_nlargest_impl
@sdc_overload_attribute(SeriesType, 'shape')
def hpat_pandas_series_shape(self):
"""
Pandas Series attribute :attr:`pandas.Series.shape` implementation
**Algorithm**: result = series.shape
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shape1
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:obj:`tuple`
a tuple of the shape of the underlying data
"""
_func_name = 'Attribute shape.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_shape_impl(self):
return self._data.shape
return hpat_pandas_series_shape_impl
@sdc_overload_method(SeriesType, 'std')
def hpat_pandas_series_std(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.std` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
skipna: :obj:`bool`
exclude NA/null values
level: :obj:`int`, :obj:`str`
If the axis is a MultiIndex (hierarchical),
count along a particular level, collapsing into a scalar
*unsupported*
ddof: :obj:`int`
Delta Degrees of Freedom.
The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only: :obj:`bool`
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data.
Not implemented for Series.
*unsupported*
Returns
-------
:obj:`scalar`
returns :obj:`scalar`
"""
_func_name = 'Method std().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not None:
raise TypingError('{} The object must be a boolean. Given skipna: {}'.format(_func_name, skipna))
if not isinstance(ddof, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object must be an integer. Given ddof: {}'.format(_func_name, ddof))
for name, arg in [('axis', axis), ('level', level), ('numeric_only', numeric_only)]:
if not isinstance(arg, (types.Omitted, types.NoneType)) and arg is not None:
raise TypingError('{} Unsupported parameters. Given {}: {}'.format(_func_name, name, arg))
def hpat_pandas_series_std_impl(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
var = self.var(axis=axis, skipna=skipna, level=level, ddof=ddof, numeric_only=numeric_only)
return var ** 0.5
return hpat_pandas_series_std_impl
@sdc_overload_attribute(SeriesType, 'values')
def hpat_pandas_series_values(self):
"""
Pandas Series attribute 'values' implementation.
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.values.html#pandas.Series.values
Algorithm: result = series.values
Where:
series: pandas.series
result: pandas.series as ndarray
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_values
"""
_func_name = 'Attribute values.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_values_impl(self):
return self._data
return hpat_pandas_series_values_impl
@sdc_overload_method(SeriesType, 'value_counts')
def hpat_pandas_series_value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.value_counts
Examples
--------
.. literalinclude:: ../../../examples/series/series_value_counts.py
:language: python
:lines: 27-
:caption: Getting the number of values excluding NaNs
:name: ex_series_value_counts
.. command-output:: python ./series/series_value_counts.py
:cwd: ../../../examples
.. note::
Parameter bins and dropna for Strings are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.count <pandas.Series.count>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.value_counts` implementation.
Note: Elements with the same count might appear in result in a different order than in Pandas
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_value_counts*
Parameters
-----------
self: :obj:`pandas.Series`
input series
normalize: :obj:`boolean`, default False
If True then the object returned will contain the relative frequencies of the unique values
sort: :obj: `boolean`, default True
Sort by frequencies
ascending: :obj:`boolean`, default False
Sort in ascending order
bins: :obj:`integer`, default None
*unsupported*
dropna: :obj:`boolean`, default True
Skip counts of NaN
Returns
-------
:returns :obj:`pandas.Series`
"""
_func_name = 'Method value_counts().'
ty_checker = TypeChecker('Method value_counts().')
ty_checker.check(self, SeriesType)
if not isinstance(normalize, (types.Omitted, types.Boolean, bool)) and normalize is True:
ty_checker.raise_exc(normalize, 'boolean', 'normalize')
if not isinstance(sort, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(sort, 'boolean', 'sort')
if not isinstance(ascending, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(ascending, 'boolean', 'ascending')
if not isinstance(bins, (types.Omitted, types.NoneType)) and bins is not None:
ty_checker.raise_exc(bins, 'boolean', 'bins')
if not isinstance(dropna, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(dropna, 'boolean', 'dropna')
if isinstance(self.data, StringArrayType):
def hpat_pandas_series_value_counts_str_impl(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
value_counts_dict = Dict.empty(
key_type=types.unicode_type,
value_type=types.intp
)
nan_counts = 0
for i, value in enumerate(self._data):
if str_arr_is_na(self._data, i):
if not dropna:
nan_counts += 1
continue
value_counts_dict[value] = value_counts_dict.get(value, 0) + 1
need_add_nan_count = not dropna and nan_counts
values = [key for key in value_counts_dict]
counts_as_list = [value_counts_dict[key] for key in value_counts_dict.keys()]
values_len = len(values)
if need_add_nan_count:
# append a separate empty string for NaN elements
values_len += 1
values.append('')
counts_as_list.append(nan_counts)
counts = numpy.asarray(counts_as_list, dtype=numpy.intp)
indexes_order = numpy.arange(values_len)
if sort:
indexes_order = counts.argsort()
if not ascending:
indexes_order = indexes_order[::-1]
counts_sorted = numpy.take(counts, indexes_order)
values_sorted_by_count = [values[i] for i in indexes_order]
# allocate the result index as a StringArray and copy values to it
index_string_lengths = numpy.asarray([len(s) for s in values_sorted_by_count])
index_total_chars = numpy.sum(index_string_lengths)
result_index = pre_alloc_string_array(len(values_sorted_by_count), index_total_chars)
cp_str_list_to_array(result_index, values_sorted_by_count)
if need_add_nan_count:
# set null bit for StringArray element corresponding to NaN element (was added as last in values)
index_previous_nan_pos = values_len - 1
for i in numpy.arange(values_len):
if indexes_order[i] == index_previous_nan_pos:
str_arr_set_na(result_index, i)
break
return pandas.Series(counts_sorted, index=result_index, name=self._name)
return hpat_pandas_series_value_counts_str_impl
elif isinstance(self.dtype, types.Number):
series_dtype = self.dtype
def hpat_pandas_series_value_counts_number_impl(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
value_counts_dict = Dict.empty(
key_type=series_dtype,
value_type=types.intp
)
zero_counts = 0
is_zero_found = False
for value in self._data:
if (dropna and numpy.isnan(value)):
continue
# Pandas hash-based value_count_float64 function doesn't distinguish between
# positive and negative zeros, hence we count zero values separately and store
# as a key the first zero value found in the Series
if not value:
zero_counts += 1
if not is_zero_found:
zero_value = value
is_zero_found = True
continue
value_counts_dict[value] = value_counts_dict.get(value, 0) + 1
if zero_counts:
value_counts_dict[zero_value] = zero_counts
unique_values = numpy.asarray(
list(value_counts_dict),
dtype=self._data.dtype
)
value_counts = numpy.asarray(
[value_counts_dict[key] for key in value_counts_dict],
dtype=numpy.intp
)
indexes_order = numpy.arange(len(value_counts))
if sort:
indexes_order = value_counts.argsort()
if not ascending:
indexes_order = indexes_order[::-1]
sorted_unique_values = numpy.take(unique_values, indexes_order)
sorted_value_counts = numpy.take(value_counts, indexes_order)
return pandas.Series(sorted_value_counts, index=sorted_unique_values, name=self._name)
return hpat_pandas_series_value_counts_number_impl
return None
@sdc_overload_method(SeriesType, 'var')
def hpat_pandas_series_var(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.var` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
skipna: :obj:`bool`
exclude NA/null values
level: :obj:`int`, :obj:`str`
If the axis is a MultiIndex (hierarchical),
count along a particular level, collapsing into a scalar
*unsupported*
ddof: :obj:`int`
Delta Degrees of Freedom.
The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only: :obj:`bool`
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data.
Not implemented for Series.
*unsupported*
Returns
-------
:obj:`scalar`
returns :obj:`scalar`
"""
_func_name = 'Method var().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not None:
raise TypingError('{} The object must be a boolean. Given skipna: {}'.format(_func_name, skipna))
if not isinstance(ddof, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object must be an integer. Given ddof: {}'.format(_func_name, ddof))
for name, arg in [('axis', axis), ('level', level), ('numeric_only', numeric_only)]:
if not isinstance(arg, (types.Omitted, types.NoneType)) and arg is not None:
raise TypingError('{} Unsupported parameters. Given {}: {}'.format(_func_name, name, arg))
def hpat_pandas_series_var_impl(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
if skipna is None:
skipna = True
if skipna:
valuable_length = len(self._data) - numpy.sum(numpy.isnan(self._data))
if valuable_length <= ddof:
return numpy.nan
return numpy.nanvar(self._data) * valuable_length / (valuable_length - ddof)
if len(self._data) <= ddof:
return numpy.nan
return self._data.var() * len(self._data) / (len(self._data) - ddof)
return hpat_pandas_series_var_impl
@sdc_overload_attribute(SeriesType, 'index')
def hpat_pandas_series_index(self):
"""
Pandas Series attribute :attr:`pandas.Series.index` implementation
**Algorithm**: result = series.index
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_index1
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_index2
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
the index of the Series
"""
_func_name = 'Attribute index.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_index_none_impl(self):
return numpy.arange(len(self._data))
return hpat_pandas_series_index_none_impl
else:
def hpat_pandas_series_index_impl(self):
return self._index
return hpat_pandas_series_index_impl
@sdc_overload_method(SeriesType, 'rolling')
def hpat_pandas_series_rolling(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.rolling
Examples
--------
.. literalinclude:: ../../../examples/series/rolling/series_rolling_min.py
:language: python
:lines: 27-
:caption: Calculate the rolling minimum.
:name: ex_series_rolling
.. command-output:: python ./series/rolling/series_rolling_min.py
:cwd: ../../../examples
.. todo:: Add support of parameters ``center``, ``win_type``, ``on``, ``axis`` and ``closed``
.. seealso::
:ref:`expanding <pandas.Series.expanding>`
Provides expanding transformations.
:ref:`ewm <pandas.Series.ewm>`
Provides exponential weighted functions.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series attribute :attr:`pandas.Series.rolling` implementation
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_rolling.TestRolling.test_series_rolling
Parameters
----------
series: :obj:`pandas.Series`
Input Series.
window: :obj:`int` or :obj:`offset`
Size of the moving window.
min_periods: :obj:`int`
Minimum number of observations in window required to have a value.
center: :obj:`bool`
Set the labels at the center of the window.
*unsupported*
win_type: :obj:`str`
Provide a window type.
*unsupported*
on: :obj:`str`
Column on which to calculate the rolling window.
*unsupported*
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
closed: :obj:`str`
Make the interval closed on the ‘right’, ‘left’, ‘both’ or ‘neither’ endpoints.
*unsupported*
Returns
-------
:class:`pandas.Series.rolling`
Output class to manipulate with input data.
"""
ty_checker = TypeChecker('Method rolling().')
ty_checker.check(self, SeriesType)
if not isinstance(window, types.Integer):
ty_checker.raise_exc(window, 'int', 'window')
minp_accepted = (types.Omitted, types.NoneType, types.Integer)
if not isinstance(min_periods, minp_accepted) and min_periods is not None:
ty_checker.raise_exc(min_periods, 'None, int', 'min_periods')
center_accepted = (types.Omitted, types.Boolean)
if not isinstance(center, center_accepted) and center is not False:
ty_checker.raise_exc(center, 'bool', 'center')
str_types = (types.Omitted, types.NoneType, types.StringLiteral, types.UnicodeType)
if not isinstance(win_type, str_types) and win_type is not None:
ty_checker.raise_exc(win_type, 'str', 'win_type')
if not isinstance(on, str_types) and on is not None:
ty_checker.raise_exc(on, 'str', 'on')
axis_accepted = (types.Omitted, types.Integer, types.StringLiteral, types.UnicodeType)
if not isinstance(axis, axis_accepted) and axis != 0:
ty_checker.raise_exc(axis, 'int, str', 'axis')
if not isinstance(closed, str_types) and closed is not None:
ty_checker.raise_exc(closed, 'str', 'closed')
nan_minp = isinstance(min_periods, (types.Omitted, types.NoneType)) or min_periods is None
def hpat_pandas_series_rolling_impl(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
if window < 0:
raise ValueError('window must be non-negative')
if nan_minp == True: # noqa
minp = window
else:
minp = min_periods
if minp < 0:
raise ValueError('min_periods must be >= 0')
if minp > window:
raise ValueError('min_periods must be <= window')
if center != False: # noqa
raise ValueError('Method rolling(). The object center\n expected: False')
if win_type is not None:
raise ValueError('Method rolling(). The object win_type\n expected: None')
if on is not None:
raise ValueError('Method rolling(). The object on\n expected: None')
if axis != 0:
raise ValueError('Method rolling(). The object axis\n expected: 0')
if closed is not None:
raise ValueError('Method rolling(). The object closed\n expected: None')
return _hpat_pandas_series_rolling_init(self, window, minp, center,
win_type, on, axis, closed)
return hpat_pandas_series_rolling_impl
@sdc_overload_attribute(SeriesType, 'size')
def hpat_pandas_series_size(self):
"""
Pandas Series attribute :attr:`pandas.Series.size` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_size
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
Return the number of elements in the underlying data.
"""
_func_name = 'Attribute size.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_size_impl(self):
return len(self._data)
return hpat_pandas_series_size_impl
@sdc_overload_attribute(SeriesType, 'str')
def hpat_pandas_series_str(self):
"""
Pandas Series attribute :attr:`pandas.Series.str` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_get
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.core.strings.StringMethods`
Output class to manipulate with input data.
"""
_func_name = 'Attribute str.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, (types.List, types.UnicodeType)):
msg = '{} Can only use .str accessor with string values. Given: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
def hpat_pandas_series_str_impl(self):
return | pandas.core.strings.StringMethods(self) | pandas.core.strings.StringMethods |
# Copyright 2021 Prayas Energy Group(https://www.prayaspune.org/peg/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""demand io layer. mainly data loading and validation functions.
Function and variable from this module are available in Demand.yml
for validation.
Few function are also used by processing layer.
Important note: Some functions are just imported and not used
are for yaml validation. All definations(including those which are
imported from other modules) from this module are available in
yaml validation.
"""
import csv
import functools
import itertools
import os
import logging
from rumi.io import functionstore as fs
from rumi.io import loaders
from rumi.io import filemanager
from rumi.io import config
from rumi.io import constant
from rumi.io import common
from rumi.io import utilities
import pandas as pd
from rumi.io.common import balancing_area, balancing_time
from rumi.io.utilities import check_consumer_validity
from rumi.io.utilities import check_geographic_validity
from rumi.io.utilities import check_time_validity
from rumi.io.multiprocessutils import execute_in_process_pool
logger = logging.getLogger(__name__)
def get_consumer_levels(ds):
"""get number of consumer levels defined
for given demand sector
Parameters
----------
ds: str
Demand sector name
Returns
-------
1 or 2
"""
DS_Cons1_Map = loaders.get_parameter("DS_Cons1_Map")
type1 = DS_Cons1_Map[ds][-1]
Cons1_Cons2_Map = loaders.get_parameter("Cons1_Cons2_Map")
if Cons1_Cons2_Map and Cons1_Cons2_Map.get(type1, None):
return 2
return 1
def get_cons_columns(ds):
"""get maximum consumer columns for given demand sector
Parameters
-----------
ds: str
Demand sector name
Returns
-------
a list of consumer columns for given demand sector
"""
return list(constant.CONSUMER_TYPES[:get_consumer_levels(ds)])
def get_consumer_granularity(ds, specified_gran):
"""Converts CONSUMERALL to actual granularity
Parameters
-----------
demand_specs: str
Demand sector
Returns
-------
one of CONSUMERTYPE1,CONSUMERTYPE2
"""
if specified_gran != "CONSUMERALL":
return specified_gran
if get_consumer_levels(ds) == 1:
return "CONSUMERTYPE1"
else:
return "CONSUMERTYPE1"
def get_geographic_granularity(demand_sector,
energy_service,
energy_carrier):
DS_ES_EC_DemandGranularity_Map = loaders.get_parameter(
"DS_ES_EC_DemandGranularity_Map")
granularity_map = DS_ES_EC_DemandGranularity_Map.set_index(['DemandSector',
'EnergyService',
'EnergyCarrier'])
return granularity_map.loc[(demand_sector,
energy_service,
energy_carrier)]['GeographicGranularity']
def get_type(demand_sector, energy_service):
"""find type of service BOTTOMUP,EXTRANEOUS,GDPELASTICITY or RESIDUAL
"""
DS_ES_Map = loaders.get_parameter('DS_ES_Map')
DS_ES_Map = DS_ES_Map.set_index(['DemandSector', 'EnergyService'])
return DS_ES_Map.loc[(demand_sector, energy_service)]['InputType']
def get_BaseYearDemand(demand_sector):
"""loader function for parameter BaseYearDemand
"""
return get_demand_sector_parameter('BaseYearDemand',
demand_sector)
def get_DemandElasticity(demand_sector):
"""loader function for parameter DemandElasticity
"""
return get_demand_sector_parameter('DemandElasticity',
demand_sector)
def get_ExtraneousDemand(demand_sector):
"""loader function for parameter ExtraneousDemand
"""
extraneous = get_demand_sector_parameter('ExtraneousDemand',
demand_sector)
return extraneous
def get_ST_Efficiency(demand_sector):
"""ST_Efficiency loader function
"""
return get_demand_sector_parameter("ST_Efficiency",
demand_sector)
def get_ST_EmissionDetails(demand_sector):
"""ST_EmissionDetails loader function
"""
return get_demand_sector_parameter("ST_EmissionDetails",
demand_sector)
def get_ResidualDemand(demand_sector):
"""loader function for parameter ResidualDemand
"""
return get_demand_sector_parameter("ResidualDemand",
demand_sector)
def get_NumConsumers(demand_sector):
"""loader function for parameter NumConsumers
"""
return get_demand_sector_parameter('NumConsumers',
demand_sector)
def get_NumInstances(demand_sector, energy_service):
"""loader function for parameter NumInstances
"""
return get_DS_ES_parameter('NumInstances',
demand_sector,
energy_service)
def get_EfficiencyLevelSplit(demand_sector, energy_service):
"""loader function for parameter EfficiencyLevelSplit
"""
return get_DS_ES_parameter('EfficiencyLevelSplit',
demand_sector,
energy_service)
def get_ES_Demand(demand_sector,
energy_service,
service_tech):
"""loader function for parameter ES_Demand
should not be used directly. use loaders.get_parameter instead.
"""
prefix = f"{service_tech}_"
filepath = find_custom_DS_ES_filepath(demand_sector,
energy_service,
'ES_Demand',
prefix)
logger.debug(f"Reading {prefix}ES_Demand from file {filepath}")
return pd.read_csv(filepath)
def get_Penetration(demand_sector,
energy_service,
ST_combination):
"""loader function for parameter Penetration
"""
for item in itertools.permutations(ST_combination):
prefix = constant.ST_SEPARATOR_CHAR.join(
item) + constant.ST_SEPARATOR_CHAR
filepath = find_custom_DS_ES_filepath(demand_sector,
energy_service,
'Penetration',
prefix)
logger.debug(f"Searching for file {filepath}")
if os.path.exists(filepath):
logger.debug(f"Reading {prefix} from file {filepath}")
return | pd.read_csv(filepath) | pandas.read_csv |
from dataset_loader import *
from utils import *
from model import BeautyModel
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.style as style
from PIL import Image
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
IMG_SIZE=224
CHANNELS=3
class Inferencing:
def __init__(self, model_path, device_):
with tf.device(device_):
self.device = device_
# self.model = BeautyModel().create_model()
self.model = tf.keras.models.load_model(model_path, compile=False)
# self.model.summary()
def create_confusion_matrix(self, model):
dataloader = BeautyDataLoader()
label_names = dataloader.get_label_names()
y_val_bin = dataloader.dataset_split['valid_y']
dataset = dataloader.create_dataset(fold=0)
val_dataset = dataset['valid']
target = y_val_bin[0]
df = perf_grid(self.device, val_dataset, target, label_names, model)
# Get the maximum F1-score for each label when using the second model and varying the threshold
print(df.head(10))
# return df, label_names
def get_predictions(self, filenames, labels, model):
from keras.preprocessing import image
org_dataset = | pd.read_csv('Dataset/beauty_dataset.csv') | pandas.read_csv |
# EcoFOCI
"""Contains a collection of wetlabs equipment parsing.
(A seabird product now)
These include:
Moored Eco and Wetstars:
* 1 channel -> 3 channel systems
Non-moored:
* processing is likely the same if recording internally.
"""
import sys
import pandas as pd
class wetlabs(object):
r""" Wetlabs Unified parser
EcoFLS(B) - single channel fluorometer (B-battery pack)
EcoFLNT(US) - dual channel fluorometer and Trub
Triplet - three channels
Eco's have an array of channels to choose from... files are all the same,
you must provide the right cal coefs for the data
"""
def __init__(self):
"""data is a pandas dataframe
Wich is immediatly converted to xarray
"""
pass
def parse(self, filename=None, return_header=True, datetime_index=True):
r"""
Basic Method to open and read fls(b) cnv files
Wetlab format is Date Time Channel_Identifier Count TempCount
"""
header = []
with open(filename) as fobj:
for k, line in enumerate(fobj.readlines()):
header = header + [line]
if "$get" in line:
headercount=k+2
break
rawdata_df = pd.read_csv(filename,
delimiter="\s+",
parse_dates=True,
header=None,
skiprows=headercount)
if len(rawdata_df.columns) == 5: #single channel
rawdata_df = rawdata_df.rename(columns={0: 'date',1:'time',3:str(rawdata_df[2][0]),4:'TempCount'})
rawdata_df = rawdata_df.drop([2],axis=1)
elif len(rawdata_df.columns) == 7: #two channel
rawdata_df = rawdata_df.rename(columns={0: 'date',
1:'time',
3:str(rawdata_df[2][0]),
5:str(rawdata_df[4][0]),
6:'TempCount'})
rawdata_df = rawdata_df.drop([2,4],axis=1)
elif len(rawdata_df.columns) == 9: #three channel
rawdata_df = rawdata_df.rename(columns={0: 'date',
1:'time',
3:str(rawdata_df[2][0]),
5:str(rawdata_df[4][0]),
7:str(rawdata_df[6][0]),
8:'TempCount'})
rawdata_df = rawdata_df.drop([2,4,6],axis=1)
else:
print(f'number of channels unknown: {len(rawdata_df.columns)}')
sys.exit()
rawdata_df["date_time"] = pd.to_datetime(
rawdata_df['date'] + " " + rawdata_df['time'], format="%m/%d/%y %H:%M:%S"
)
if datetime_index:
rawdata_df = rawdata_df.set_index( | pd.DatetimeIndex(rawdata_df['date_time']) | pandas.DatetimeIndex |
from PyDSS.pyContrReader import pySubscriptionReader as pySR
from PyDSS.pyLogger import getLoggerTag
from PyDSS.unitDefinations import type_info as Types
from PyDSS.unitDefinations import unit_info as Units
from PyDSS.pyContrReader import pyExportReader as pyER
from PyDSS import unitDefinations
from PyDSS.exceptions import InvalidParameter
from PyDSS.utils.dataframe_utils import write_dataframe
import pandas as pd
import numpy as np
#import helics as h
import pathlib
import gzip
import logging
import shutil
import math
import os
class ResultContainer:
def __init__(self, Options, SystemPaths, dssObjects, dssObjectsByClass, dssBuses, dssSolver, dssCommand):
if Options["Logging"]["Pre-configured logging"]:
LoggerTag = __name__
else:
LoggerTag = getLoggerTag(Options)
self.metadata_info = unitDefinations.unit_info
self.__dssDolver = dssSolver
self.Results = {}
self.CurrentResults = {}
self.pyLogger = logging.getLogger(LoggerTag)
self.Buses = dssBuses
self.ObjectsByElement = dssObjects
self.ObjectsByClass = dssObjectsByClass
self.SystemPaths = SystemPaths
self.__dssCommand = dssCommand
self.__Settings = Options
self.__StartDay = Options['Project']['Start Day']
self.__EndDay = Options['Project']['End Day']
self.__DateTime = []
self.__Frequency = []
self.__SimulationMode = []
self.__ExportFormat = Options['Exports']['Export Format']
self.__ExportCompression = Options['Exports']['Export Compression']
self.__publications = {}
self.__subscriptions = {}
self.ExportFolder = os.path.join(self.SystemPaths['Export'], Options['Project']['Active Scenario'])
pathlib.Path(self.ExportFolder).mkdir(parents=True, exist_ok=True)
if self.__Settings['Exports']['Export Mode'] == 'byElement':
self.FileReader = pyER(os.path.join(SystemPaths['ExportLists'], 'ExportMode-byElement.toml'))
self.ExportList = self.FileReader.pyControllers
self.PublicationList = self.FileReader.publicationList
self.CreateListByElement()
elif self.__Settings['Exports']['Export Mode'] == 'byClass':
self.FileReader = pyER(os.path.join(SystemPaths['ExportLists'], 'ExportMode-byClass.toml'))
self.ExportList = self.FileReader.pyControllers
self.PublicationList = self.FileReader.publicationList
self.CreateListByClass()
if self.__Settings['Helics']['Co-simulation Mode']:
self.__createPyDSSfederate()
self.__registerFederatePublications()
self.__registerFederateSubscriptions()
h.helicsFederateEnterExecutingMode(self.__PyDSSfederate)
self.pyLogger.debug('Entered HELICS execution mode')
return
def __createPyDSSfederate(self):
fedinfo = h.helicsCreateFederateInfo()
h.helicsFederateInfoSetCoreName(fedinfo, self.__Settings['Helics']['Federate name'])
h.helicsFederateInfoSetCoreTypeFromString(fedinfo, self.__Settings['Helics']['Core type'])
h.helicsFederateInfoSetCoreInitString(fedinfo, "--federates=1")
h.helicsFederateInfoSetTimeProperty(fedinfo, h.helics_property_time_delta, self.__Settings['Helics']['Time delta'])
h.helicsFederateInfoSetIntegerProperty(fedinfo, h.helics_property_int_log_level,
self.__Settings['Helics']['Helics logging level'])
h.helicsFederateInfoSetFlagOption(fedinfo, h.helics_flag_uninterruptible, True)
self.__PyDSSfederate = h.helicsCreateValueFederate(self.__Settings['Helics']['Federate name'], fedinfo)
return
def __registerFederateSubscriptions(self):
self.FileReader = pySR(os.path.join(self.SystemPaths['ExportLists'], 'Helics-Subcriptions.xlsx'))
self.__subscriptions = self.FileReader.SubscriptionDict
for element, subscription in self.__subscriptions.items():
assert element in self.ObjectsByElement, '"{}" listed in the subscription file not '.format(element) +\
"available in PyDSS's master object dictionary."
if subscription["Subscribe"] == True:
sub = h.helicsFederateRegisterSubscription(self.__PyDSSfederate, subscription["Subscription ID"],
subscription["Unit"])
self.pyLogger.debug('PyDSS subscribing to "{}" of with units "{}"'.format(
subscription["Subscription ID"],
subscription["Unit"])
)
subscription['Subscription'] = sub
self.__subscriptions[element] = subscription
return
def updateSubscriptions(self):
for element, subscriptionData in self.__subscriptions.items():
if 'Subscription' in subscriptionData:
if subscriptionData['Data type'].lower() == 'double':
value = h.helicsInputGetDouble(subscriptionData['Subscription'])
elif subscriptionData['Data type'].lower() == 'vector':
value = h.helicsInputGetVector(subscriptionData['Subscription'])
elif subscriptionData['Data type'].lower() == 'string':
value = h.helicsInputGetString(subscriptionData['Subscription'])
elif subscriptionData['Data type'].lower() == 'boolean':
value = h.helicsInputGetBoolean(subscriptionData['Subscription'])
elif subscriptionData['Data type'].lower() == 'integer':
value = h.helicsInputGetInteger(subscriptionData['Subscription'])
dssElement = self.ObjectsByElement[element]
dssElement.SetParameter(subscriptionData['Property'], value)
self.pyLogger.debug('Value for "{}.{}" changed to "{}"'.format(
element,
subscriptionData['Property'],
value
))
return
def __registerFederatePublications(self):
self.__publications = {}
for object, property_dict in self.CurrentResults.items():
objClass = None
for Class in self.ObjectsByClass:
if object in self.ObjectsByClass[Class]:
objClass = Class
break
for property, type_dict in property_dict.items():
if '{} {}'.format(objClass, property) in self.PublicationList:
for typeID, type in type_dict.items():
name = '{}.{}.{}'.format(object, property, typeID)
self.__publications[name] = h.helicsFederateRegisterGlobalTypePublication(
self.__PyDSSfederate,
name,
type['type'],
type['unit']
)
return
def __initCurrentResults(self, PptyName):
data = {}
if PptyName in Units:
if isinstance(Units[PptyName], dict):
for subset, unit in Units[PptyName].items():
data[subset] = {
'value': None,
'unit': Units[PptyName][subset],
'type': Types[PptyName]
}
else:
data['A'] = {
'value': None,
'unit': Units[PptyName],
'type': Types[PptyName]
}
else:
data['A'] = {
'value': None,
'unit': 'NA',
'type': 'double'
}
return data
def CreateListByClass(self):
for Class, Properties in self.ExportList.items():
if Class == 'Buses':
self.Results[Class] = {}
for PptyIndex, PptyName in enumerate(Properties):
if isinstance(PptyName, str):
self.Results[Class][PptyName] = {}
for BusName, BusObj in self.Buses.items():
if self.Buses[BusName].inVariableDict(PptyName):
self.Results[Class][PptyName][BusName] = []
if BusName not in self.CurrentResults:
self.CurrentResults[BusName] = {}
self.CurrentResults[BusName][PptyName] = self.__initCurrentResults(PptyName)
else:
if Class in self.ObjectsByClass:
self.Results[Class] = {}
for PptyIndex, PptyName in enumerate(Properties):
if isinstance(PptyName, str):
self.Results[Class][PptyName] = {}
for ElementName, ElmObj in self.ObjectsByClass[Class].items():
if self.ObjectsByClass[Class][ElementName].IsValidAttribute(PptyName):
self.Results[Class][PptyName][ElementName] = []
if ElementName not in self.CurrentResults:
self.CurrentResults[ElementName] = {}
self.CurrentResults[ElementName][PptyName] = self.__initCurrentResults(PptyName)
return
def CreateListByElement(self):
for Element, Properties in self.ExportList.items():
if Element in self.ObjectsByElement:
self.Results[Element] = {}
self.CurrentResults[Element] = {}
for PptyIndex, PptyName in enumerate(Properties):
if isinstance(PptyName, str):
if self.ObjectsByElement[Element].IsValidAttribute(PptyName):
self.Results[Element][PptyName] = []
self.CurrentResults[Element][PptyName] = self.__initCurrentResults(PptyName)
elif Element in self.Buses:
self.Results[Element] = {}
self.CurrentResults[Element] = {}
for PptyIndex, PptyName in enumerate(Properties):
if isinstance(PptyName, str):
if self.Buses[Element].inVariableDict(PptyName):
self.Results[Element][PptyName] = []
self.CurrentResults[Element][PptyName] = self.__initCurrentResults(PptyName)
return
def __parse_current_values(self, Element, Property, Values):
ans = self.CurrentResults[Element][Property]
for filter, data in ans.items():
if filter == 'A':
ans[filter]['value'] = Values
elif filter == 'E':
ans[filter]['value'] = Values[0::2]
elif filter == '0':
ans[filter]['value'] = Values[1::2]
if self.__Settings['Helics']['Co-simulation Mode']:
name = '{}.{}.{}'.format(Element, Property, filter)
if isinstance(ans[filter]['value'], list) and name in self.__publications:
h.helicsPublicationPublishVector(self.__publications[name], ans[filter]['value'])
elif isinstance(Values, float) and name in self.__publications:
h.helicsPublicationPublishDouble(self.__publications[name], ans[filter]['value'])
elif isinstance(Values, str) and name in self.__publications:
h.helicsPublicationPublishString(self.__publications[name], ans[filter]['value'])
elif isinstance(Values, bool) and name in self.__publications:
h.helicsPublicationPublishBoolean(self.__publications[name], ans[filter]['value'])
elif isinstance(Values, int) and name in self.__publications:
h.helicsPublicationPublishInteger(self.__publications[name], ans[filter]['value'])
self.CurrentResults[Element][Property] = ans
return
def InitializeDataStore(self, _, __):
pass
def UpdateResults(self):
if self.__Settings['Helics']['Co-simulation Mode']:
r_seconds = self.__dssDolver.GetTotalSeconds()
print('Time: ', r_seconds)
c_seconds = 0
while c_seconds < r_seconds:
c_seconds = h.helicsFederateRequestTime(self.__PyDSSfederate, r_seconds)
self.__DateTime.append(self.__dssDolver.GetDateTime())
self.__Frequency.append(self.__dssDolver.getFrequency())
self.__SimulationMode.append(self.__dssDolver.getMode())
if self.__Settings['Exports']['Export Mode'] == 'byElement':
for Element in self.Results.keys():
for Property in self.Results[Element].keys():
if '.' in Element:
value = self.ObjectsByElement[Element].GetValue(Property)
self.Results[Element][Property].append(value)
self.__parse_current_values(Element, Property, value)
else:
value = self.Buses[Element].GetVariable(Property)
self.Results[Element][Property].append(value)
self.__parse_current_values(Element, Property, value)
elif self.__Settings['Exports']['Export Mode'] == 'byClass':
for Class in self.Results.keys():
for Property in self.Results[Class].keys():
for Element in self.Results[Class][Property].keys():
if Class == 'Buses':
value = self.Buses[Element].GetVariable(Property)
self.Results[Class][Property][Element].append(value)
self.__parse_current_values(Element, Property, value)
else:
value = self.ObjectsByClass[Class][Element].GetValue(Property)
self.Results[Class][Property][Element].append(value)
self.__parse_current_values(Element, Property, value)
return
def ExportResults(self, fileprefix=''):
if self.__Settings['Exports']['Export Mode'] == 'byElement':
self.__ExportResultsByElements(fileprefix)
elif self.__Settings['Exports']['Export Mode'] == 'byClass':
self.__ExportResultsByClass(fileprefix)
self.__ExportEventLog()
def FlushData(self):
pass
def max_num_bytes(self):
return 0
def __ExportResultsByClass(self, fileprefix=''):
for Class in self.Results.keys():
for Property in self.Results[Class].keys():
Class_ElementDatasets = []
PptyLvlHeader = ''
for Element in self.Results[Class][Property].keys():
ElmLvlHeader = ''
if isinstance(self.Results[Class][Property][Element][0], list):
Data = np.array(self.Results[Class][Property][Element])
for i in range(len(self.Results[Class][Property][Element][0])):
if Property in self.metadata_info:
if i % 2 == 0 and 'E' in self.metadata_info[Property]:
ElmLvlHeader += '{} ph:{} [{}],'.format(Element, math.floor(i / 2) + 1,
self.metadata_info[Property]['E'])
elif i % 2 == 1 and 'O' in self.metadata_info[Property]:
ElmLvlHeader += '{} ph:{} [{}],'.format(Element, math.floor(i / 2) + 1,
self.metadata_info[Property]['O'])
else:
ElmLvlHeader += '{}-{} [{}],'.format(Element, i, self.metadata_info[Property])
else:
ElmLvlHeader += Element + '-' + str(i) + ','
else:
Data = np.transpose(np.array([self.Results[Class][Property][Element]]))
if Property in self.metadata_info:
ElmLvlHeader = '{} [{}],'.format(Element, self.metadata_info[Property])
else:
ElmLvlHeader = Element + ','
if self.__Settings['Exports']['Export Style'] == 'Separate files':
fname = '-'.join([Class, Property, Element, str(self.__StartDay), str(self.__EndDay) ,fileprefix])
columns = [x for x in ElmLvlHeader.split(',') if x != '']
tuples = list(zip(*[self.__DateTime, self.__Frequency, self.__SimulationMode]))
index = pd.MultiIndex.from_tuples(tuples, names=['timestamp', 'frequency', 'Simulation mode'])
df = pd.DataFrame(Data, index=index, columns=columns)
if self.__ExportFormat == "h5":
df.reset_index(inplace=True)
self.__ExportDataFrame(df, os.path.join(self.ExportFolder, fname))
elif self.__Settings['Exports']['Export Style'] == 'Single file':
Class_ElementDatasets.append(Data)
PptyLvlHeader += ElmLvlHeader
if self.__Settings['Exports']['Export Style'] == 'Single file':
assert Class_ElementDatasets
Dataset = Class_ElementDatasets[0]
if len(Class_ElementDatasets) > 1:
for D in Class_ElementDatasets[1:]:
Dataset = np.append(Dataset, D, axis=1)
columns = [x for x in PptyLvlHeader.split(',') if x != '']
tuples = list(zip(*[self.__DateTime, self.__Frequency, self.__SimulationMode]))
index = | pd.MultiIndex.from_tuples(tuples, names=['timestamp', 'frequency', 'Simulation mode']) | pandas.MultiIndex.from_tuples |
import json
import pytest
import numpy as np
import pandas as pd
import scipy.spatial.distance as scipy_distance
from whatlies import Embedding, EmbeddingSet
from .common import validate_plot_general_properties
"""
*Guide*
Here are the plot's propertites which could be checked (some of them may not be applicable
for a particular plot/test case):
- type: the type of plot; usually it's scatter plot with circle marks.
- data_field: the name of the field of chart data which is used for datapoints' coordinates.
- data: the position (i.e. coordinates) of datapoints in the plot.
- x_label: label of x-axis.
- y_label: label of y-axis.
- tilte: title of the plot.
- label_field: the name of the field of chart data which is used for annotating data points with text labels.
- label: the text labels used for annotation of datapoints.
- color_field: the name of the field of chart data which is used for coloring datapoints.
"""
@pytest.fixture
def embset():
names = ["red", "blue", "green", "yellow", "white"]
vectors = np.random.rand(5, 4) * 10 - 5
embeddings = [Embedding(name, vector) for name, vector in zip(names, vectors)]
return EmbeddingSet(*embeddings)
def test_default(embset):
p = embset.plot_interactive()
chart = json.loads(p.to_json())
props = {
"type": "circle",
"data_field": ["x_axis", "y_axis"],
"data": embset.to_X()[:, :2],
"x_label": "Dimension 0",
"y_label": "Dimension 1",
"title": "Dimension 0 vs. Dimension 1",
"label_field": "original",
"label": [v.orig for v in embset.embeddings.values()],
"color_field": "",
}
chart_data = | pd.DataFrame(chart["datasets"][chart["data"]["name"]]) | pandas.DataFrame |
# Title: Sensor Reading Output
# Description: Pulls sensor readings data from the databases
# and outputs to a CSV file for analysis
# Author: <NAME>
# Date: 30/03/2021
# Import libraries
#import sys
import pandas as pd
import numpy as np
import os
#import traceback
import datetime
import pyodbc
#import re
from app import dbConnect
#sql = "{CALL [dbo].[PROC_GET_ALL_DATA]}"
sql = "CALL [dbo].[PROC_GET_ALL_DATA], @OUT = @out OUTPUT;"
# CONNECT TO DB HERE
conn = dbConnect()
cursor = conn.cursor()
try:
# Execute the SQL statement with the parameters prepared
cursor.execute(sql)
# Close open database cursor
cursor.close()
except pyodbc.Error as e:
# Extract the error argument
sqlstate = e.args[1]
# Close cursor
cursor.close()
# Print error is one should occur and raise an exception
print("An error occurred executing stored procedure (noReturn): " + sqlstate)
print(e) # Testing
dataDF = | pd.DataFrame() | pandas.DataFrame |
import codecs
import math
import os
import re
import gensim
import jieba.posseg as jieba
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
# 返回特征词向量
def getWordVecs(wordList, model):
name = []
vecs = []
for word in wordList:
word = word.replace('\n', '')
try:
if word in model: # 模型中存在该词的向量表示
name.append(word)
vecs.append(model[word])
except KeyError:
continue
a = pd.DataFrame(name, columns=['word'])
b = pd.DataFrame(np.array(vecs, dtype='float'))
return pd.concat([a, b], axis=1)
# 数据预处理操作:分词,去停用词,词性筛选
def dataPrepos(text, stopkey):
text = re.sub(r"[^\u4e00-\u9fa5]", '', text) # 去除非中文词语
result = []
pos = ['n', 'nz', 'v', 'vd', 'vn', 'result', 'a', 'd'] # 定义选取的词性
seg = jieba.lcut(text) # 分词
for i in seg:
if i.word not in result and i.word not in stopkey and i.flag in pos: # 去重 + 去停用词 + 词性筛选
# print i.word
result.append(i.word)
if not result:
result.append("无")
return result
# 根据数据获取候选关键词词向量
def buildAllWordsVecs(data, stopkey, model):
idList, abstractList = data['_key'], data['post']
for index in range(len(idList)):
id = idList[index]
post_content = abstractList[index]
result = dataPrepos(post_content, stopkey) # 处理摘要
# 获取候选关键词的词向量
words = list(set(result)) # 数组元素去重,得到候选关键词列表
wordvecs = getWordVecs(words, model) # 获取候选关键词的词向量表示
# 词向量写入csv文件,每个词400维
data_vecs = pd.DataFrame(wordvecs)
data_vecs.to_csv('result/vecs/wordvecs_' + str(id) + '.csv', index=False)
print("document ", id, " well done.")
# 对词向量采用K-means聚类抽取TopK关键词
def getkeywords_kmeans(data, topK):
words = data["word"] # 词汇
vecs = data.iloc[:, 1:] # 向量表示
kmeans = KMeans(n_clusters=1, random_state=10).fit(vecs)
labels = kmeans.labels_ # 类别结果标签
labels = pd.DataFrame(labels, columns=['label'])
new_df = pd.concat([labels, vecs], axis=1)
df_count_type = new_df.groupby('label').size() # 各类别统计个数
# print df_count_type
vec_center = kmeans.cluster_centers_ # 聚类中心
# 计算距离(相似性) 采用欧几里得距离(欧式距离)
distances = []
vec_words = np.array(vecs) # 候选关键词向量,dataFrame转array
vec_center = vec_center[0] # 第一个类别聚类中心,本例只有一个类别
length = len(vec_center) # 向量维度
for index in range(len(vec_words)): # 候选关键词个数
cur_wordvec = vec_words[index] # 当前词语的词向量
dis = 0 # 向量距离
for index2 in range(length):
dis += (vec_center[index2] - cur_wordvec[index2]) * (vec_center[index2] - cur_wordvec[index2])
dis = math.sqrt(dis)
distances.append(dis)
distances = pd.DataFrame(distances, columns=['dis'])
result = pd.concat([words, labels, distances], axis=1) # 拼接词语与其对应中心点的距离
result = result.sort_values(by="dis", ascending=True) # 按照距离大小进行升序排序
# 抽取排名前topK个词语作为文本关键词
wordlist = np.array(result['word']) # 选择词汇列并转成数组格式
word_split = [wordlist[x] for x in range(0, topK)] # 抽取前topK个词汇
word_split = " ".join(word_split)
return word_split
def word():
# 读取数据集
dataFile = 'data/full_data.csv'
data = | pd.read_csv(dataFile) | pandas.read_csv |
# Copyright 2019 Elasticsearch BV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from datetime import datetime
import numpy as np
import pandas as pd
from eland import (
Client,
DEFAULT_PROGRESS_REPORTING_NUM_ROWS,
elasticsearch_date_to_pandas_date,
)
from eland import FieldMappings
from eland import Index
from eland import Operations
from eland.filter import QueryFilter
class QueryCompiler:
"""
Some notes on what can and can not be mapped:
1. df.head(10)
/_search?size=10
2. df.tail(10)
/_search?size=10&sort=_doc:desc
+ post_process results (sort_index)
3. df[['OriginAirportID', 'AvgTicketPrice', 'Carrier']]
/_search
{ '_source': ['OriginAirportID', 'AvgTicketPrice', 'Carrier']}
4. df.drop(['1', '2'])
/_search
{'query': {'bool': {'must': [], 'must_not': [{'ids': {'values': ['1', '2']}}]}}, 'aggs': {}}
This doesn't work is size is set (e.g. head/tail) as we don't know in Elasticsearch if values '1' or '2' are
in the first/last n fields.
A way to mitigate this would be to post process this drop - TODO
"""
def __init__(
self,
client=None,
index_pattern=None,
display_names=None,
index_field=None,
to_copy=None,
):
# Implement copy as we don't deep copy the client
if to_copy is not None:
self._client = Client(to_copy._client)
self._index_pattern = to_copy._index_pattern
self._index = Index(self, to_copy._index.index_field)
self._operations = copy.deepcopy(to_copy._operations)
self._mappings = copy.deepcopy(to_copy._mappings)
else:
self._client = Client(client)
self._index_pattern = index_pattern
# Get and persist mappings, this allows us to correctly
# map returned types from Elasticsearch to pandas datatypes
self._mappings = FieldMappings(
client=self._client,
index_pattern=self._index_pattern,
display_names=display_names,
)
self._index = Index(self, index_field)
self._operations = Operations()
@property
def index(self):
return self._index
@property
def columns(self):
columns = self._mappings.display_names
return pd.Index(columns)
def _get_display_names(self):
display_names = self._mappings.display_names
return pd.Index(display_names)
def _set_display_names(self, display_names):
self._mappings.display_names = display_names
def get_field_names(self, include_scripted_fields):
return self._mappings.get_field_names(include_scripted_fields)
def add_scripted_field(self, scripted_field_name, display_name, pd_dtype):
result = self.copy()
self._mappings.add_scripted_field(scripted_field_name, display_name, pd_dtype)
return result
@property
def dtypes(self):
return self._mappings.dtypes()
# END Index, columns, and dtypes objects
def _es_results_to_pandas(self, results, batch_size=None, show_progress=False):
"""
Parameters
----------
results: dict
Elasticsearch results from self.client.search
Returns
-------
df: pandas.DataFrame
_source values extracted from results and mapped to pandas DataFrame
dtypes are mapped via Mapping object
Notes
-----
Fields containing lists in Elasticsearch don't map easily to pandas.DataFrame
For example, an index with mapping:
```
"mappings" : {
"properties" : {
"group" : {
"type" : "keyword"
},
"user" : {
"type" : "nested",
"properties" : {
"first" : {
"type" : "keyword"
},
"last" : {
"type" : "keyword"
}
}
}
}
}
```
Adding a document:
```
"_source" : {
"group" : "amsterdam",
"user" : [
{
"first" : "John",
"last" : "Smith"
},
{
"first" : "Alice",
"last" : "White"
}
]
}
```
(https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html)
this would be transformed internally (in Elasticsearch) into a document that looks more like this:
```
{
"group" : "amsterdam",
"user.first" : [ "alice", "john" ],
"user.last" : [ "smith", "white" ]
}
```
When mapping this a pandas data frame we mimic this transformation.
Similarly, if a list is added to Elasticsearch:
```
PUT my_index/_doc/1
{
"list" : [
0, 1, 2
]
}
```
The mapping is:
```
"mappings" : {
"properties" : {
"user" : {
"type" : "long"
}
}
}
```
TODO - explain how lists are handled
(https://www.elastic.co/guide/en/elasticsearch/reference/current/array.html)
TODO - an option here is to use Elasticsearch's multi-field matching instead of pandas treatment of lists
(which isn't great)
NOTE - using this lists is generally not a good way to use this API
"""
partial_result = False
if results is None:
return partial_result, self._empty_pd_ef()
# This is one of the most performance critical areas of eland, and it repeatedly calls
# self._mappings.field_name_pd_dtype and self._mappings.date_field_format
# therefore create a simple cache for this data
field_mapping_cache = FieldMappingCache(self._mappings)
rows = []
index = []
if isinstance(results, dict):
iterator = results["hits"]["hits"]
if batch_size is not None:
raise NotImplementedError(
"Can not specify batch_size with dict results"
)
else:
iterator = results
i = 0
for hit in iterator:
i = i + 1
if "_source" in hit:
row = hit["_source"]
else:
row = {}
# script_fields appear in 'fields'
if "fields" in hit:
fields = hit["fields"]
for key, value in fields.items():
row[key] = value
# get index value - can be _id or can be field value in source
if self._index.is_source_field:
index_field = row[self._index.index_field]
else:
index_field = hit[self._index.index_field]
index.append(index_field)
# flatten row to map correctly to 2D DataFrame
rows.append(self._flatten_dict(row, field_mapping_cache))
if batch_size is not None:
if i >= batch_size:
partial_result = True
break
if show_progress:
if i % DEFAULT_PROGRESS_REPORTING_NUM_ROWS == 0:
print(f"{datetime.now()}: read {i} rows")
# Create pandas DataFrame
df = pd.DataFrame(data=rows, index=index)
# _source may not contain all field_names in the mapping
# therefore, fill in missing field_names
# (note this returns self.field_names NOT IN df.columns)
missing_field_names = list(
set(self.get_field_names(include_scripted_fields=True)) - set(df.columns)
)
for missing in missing_field_names:
pd_dtype = self._mappings.field_name_pd_dtype(missing)
df[missing] = pd.Series(dtype=pd_dtype)
# Rename columns
df.rename(columns=self._mappings.get_renames(), inplace=True)
# Sort columns in mapping order
if len(self.columns) > 1:
df = df[self.columns]
if show_progress:
print(f"{datetime.now()}: read {i} rows")
return partial_result, df
def _flatten_dict(self, y, field_mapping_cache):
out = {}
def flatten(x, name=""):
# We flatten into source fields e.g. if type=geo_point
# location: {lat=52.38, lon=4.90}
if name == "":
is_source_field = False
pd_dtype = "object"
else:
try:
pd_dtype = field_mapping_cache.field_name_pd_dtype(name[:-1])
is_source_field = True
except KeyError:
is_source_field = False
pd_dtype = "object"
if not is_source_field and type(x) is dict:
for a in x:
flatten(x[a], name + a + ".")
elif not is_source_field and type(x) is list:
for a in x:
flatten(a, name)
elif is_source_field: # only print source fields from mappings
# (TODO - not so efficient for large number of fields and filtered mapping)
field_name = name[:-1]
# Coerce types - for now just datetime
if pd_dtype == "datetime64[ns]":
x = elasticsearch_date_to_pandas_date(
x, field_mapping_cache.date_field_format(field_name)
)
# Elasticsearch can have multiple values for a field. These are represented as lists, so
# create lists for this pivot (see notes above)
if field_name in out:
if type(out[field_name]) is not list:
field_as_list = [out[field_name]]
out[field_name] = field_as_list
out[field_name].append(x)
else:
out[field_name] = x
else:
# Script fields end up here
# Elasticsearch returns 'Infinity' as a string for np.inf values.
# Map this to a numeric value to avoid this whole Series being classed as an object
# TODO - create a lookup for script fields and dtypes to only map 'Infinity'
# if the field is numeric. This implementation will currently map
# any script field with "Infinity" as a string to np.inf
if x == "Infinity":
out[name[:-1]] = np.inf
else:
out[name[:-1]] = x
flatten(y)
return out
def _index_count(self):
"""
Returns
-------
index_count: int
Count of docs where index_field exists
"""
return self._operations.index_count(self, self.index.index_field)
def _index_matches_count(self, items):
"""
Returns
-------
index_count: int
Count of docs where items exist
"""
return self._operations.index_matches_count(self, self.index.index_field, items)
def _empty_pd_ef(self):
# Return an empty dataframe with correct columns and dtypes
df = | pd.DataFrame() | pandas.DataFrame |
from xlsx_writer import dataframes_to_xlsx
import os
import pandas as pd
import tempfile
def test_datasets_to_xlsx():
output_file_name = 'sample_test.xlsx'
tmp_output_dir = tempfile.TemporaryDirectory(dir='.')
output_file = os.path.join(tmp_output_dir.name, output_file_name)
df1 = | pd.DataFrame({'dfId': [1], 'gender': ['M'], 'birthdate': ['1953/10/5']}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Tests for Results.predict
"""
import numpy as np
import pandas as pd
from numpy.testing import assert_allclose, assert_equal
import pandas.util.testing as pdt
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
class CheckPredictReturns(object):
def test_2d(self):
res = self.res
data = self.data
fitted = res.fittedvalues.iloc[1:10:2]
pred = res.predict(data.iloc[1:10:2])
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# plain dict
xd = dict(zip(data.columns, data.iloc[1:10:2].values.T))
pred = res.predict(xd)
assert_equal(pred.index, np.arange(len(pred)))
assert_allclose(pred.values, fitted.values, rtol=1e-13)
def test_1d(self):
# one observation
res = self.res
data = self.data
pred = res.predict(data.iloc[:1])
pdt.assert_index_equal(pred.index, data.iloc[:1].index)
assert_allclose(pred.values, res.fittedvalues[0], rtol=1e-13)
fittedm = res.fittedvalues.mean()
xmean = data.mean()
pred = res.predict(xmean.to_frame().T)
assert_equal(pred.index, np.arange(1))
assert_allclose(pred, fittedm, rtol=1e-13)
# Series
pred = res.predict(data.mean())
assert_equal(pred.index, np.arange(1))
assert_allclose(pred.values, fittedm, rtol=1e-13)
# dict with scalar value (is plain dict)
# Note: this warns about dropped nan, even though there are None -FIXED
pred = res.predict(data.mean().to_dict())
assert_equal(pred.index, np.arange(1))
assert_allclose(pred.values, fittedm, rtol=1e-13)
def test_nopatsy(self):
res = self.res
data = self.data
fitted = res.fittedvalues.iloc[1:10:2]
# plain numpy array
pred = res.predict(res.model.exog[1:10:2], transform=False)
assert_allclose(pred, fitted.values, rtol=1e-13)
# pandas DataFrame
x = pd.DataFrame(res.model.exog[1:10:2],
index = data.index[1:10:2],
columns=res.model.exog_names)
pred = res.predict(x)
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# one observation - 1-D
pred = res.predict(res.model.exog[1], transform=False)
assert_allclose(pred, fitted.values[0], rtol=1e-13)
# one observation - pd.Series
pred = res.predict(x.iloc[0])
pdt.assert_index_equal(pred.index, fitted.index[:1])
assert_allclose(pred.values[0], fitted.values[0], rtol=1e-13)
class TestPredictOLS(CheckPredictReturns):
@classmethod
def setup_class(cls):
nobs = 30
np.random.seed(987128)
x = np.random.randn(nobs, 3)
y = x.sum(1) + np.random.randn(nobs)
index = ['obs%02d' % i for i in range(nobs)]
# add one extra column to check that it doesn't matter
cls.data = pd.DataFrame(np.round(np.column_stack((y, x)), 4),
columns='y var1 var2 var3'.split(),
index=index)
cls.res = OLS.from_formula('y ~ var1 + var2', data=cls.data).fit()
class TestPredictGLM(CheckPredictReturns):
@classmethod
def setup_class(cls):
nobs = 30
np.random.seed(987128)
x = np.random.randn(nobs, 3)
y = x.sum(1) + np.random.randn(nobs)
index = ['obs%02d' % i for i in range(nobs)]
# add one extra column to check that it doesn't matter
cls.data = pd.DataFrame(np.round(np.column_stack((y, x)), 4),
columns='y var1 var2 var3'.split(),
index=index)
cls.res = GLM.from_formula('y ~ var1 + var2', data=cls.data).fit()
def test_predict_offset(self):
res = self.res
data = self.data
fitted = res.fittedvalues.iloc[1:10:2]
offset = np.arange(len(fitted))
fitted = fitted + offset
pred = res.predict(data.iloc[1:10:2], offset=offset)
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# plain dict
xd = dict(zip(data.columns, data.iloc[1:10:2].values.T))
pred = res.predict(xd, offset=offset)
assert_equal(pred.index, np.arange(len(pred)))
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# offset as pandas.Series
data2 = data.iloc[1:10:2].copy()
data2['offset'] = offset
pred = res.predict(data2, offset=data2['offset'])
pdt.assert_index_equal(pred.index, fitted.index)
assert_allclose(pred.values, fitted.values, rtol=1e-13)
# check nan in exog is ok, preserves index matching offset length
data2 = data.iloc[1:10:2].copy()
data2['offset'] = offset
data2.iloc[0, 1] = np.nan
pred = res.predict(data2, offset=data2['offset'])
| pdt.assert_index_equal(pred.index, fitted.index) | pandas.util.testing.assert_index_equal |
##Script to consolidate output from corrected MIGMAP alignments into consensus CDR3s for cells, B Cells
#<NAME>, 5/24/15
##---NOTES----
#1. Run in the directory of the corrected aligned reads, ie the tab delimited corected aligned files
#!/usr/bin/python # 2.7.14
import os
import collections
import glob
import pandas as pd # 0.22
import numpy as np # 1.15.2
#get the current directory, and change to it
curdir = os.getcwd()
##--Set up dataframe to fill with consensus from each cell, also counter to act as the row index
df = pd.DataFrame()
#thresholds for calling consensus, ie if top alignment is 2x greater (in freq) than the next, then its the consensus; min count thresh
fthresh = 2
cthresh = 25
cols_to_keep = ['Sample', 'chain', 'v', 'd', 'j', 'cdr3aa', 'cdr3nt', 'freq', 'count']
##--Open each alignment tab delimited file and look for the consensus TRA and TRB
for alignment in glob.glob(curdir + "/*corr_align.txt"):
print('Working on ' + alignment.replace(curdir+'/','').lstrip('/'))
#Set booleans for having recorded TRA and TRB, and variables containing the TRA and TRB (2 potential TRAs)
IGH = False
IGK = False
IGL = False
IGH_cons = pd.DataFrame()
IGK_cons = pd.DataFrame()
IGL_cons = pd.DataFrame()
#no need for with structure when using read_table, so just grab all of the data and put into dataframe
table = pd.read_table(alignment)
#FIRST CHECK TO SEE IF WE MEASURED IGH, IGK, IGL IN THIS CELL, if so set them TRUE and set no consensus
if (table['v'].str.contains('IGHV').sum(numeric_only=False) == 0):
IGH = True
IGH_cons = pd.Series(['No IGH measured'], index=['v'])
if (table['v'].str.contains('IGKV').sum(numeric_only=False) == 0):
IGK = True
IGK_cons = pd.Series(['No IGK measured'], index=['v'])
if (table['v'].str.contains('IGLV').sum(numeric_only=False) == 0):
IGL = True
IGL_cons = pd.Series(['No IGL measured'], index=['v'])
#while loop to keep searching for consensus if both TRA and TRB haven't been found
while (IGH == False or IGK == False or IGL == False):
#Now look for the IGH alignment (similar to code in collect_migmap_results_TCR.py)
if (IGH == False):
index_IGH = table['v'].str.contains('IGHV')
table_IGH = table[index_IGH].reset_index(drop=True)
table_IGH = table_IGH.replace('\?', '0', regex=True)
groupby_cdr3aa = table_IGH[['freq','count']].groupby(table_IGH['cdr3aa'])
groupby_cdr3aa_table = groupby_cdr3aa.sum().sort_values('count', ascending=False)
unique_cdr3 = len(groupby_cdr3aa_table.index)
##---Determine the consensus IGH gene
#we have 2 or more cdr3s measured
if (unique_cdr3 > 1):
if (groupby_cdr3aa_table.ix[0,'count'] >= fthresh*groupby_cdr3aa_table.ix[1,'count'] and groupby_cdr3aa_table.ix[0,'count'] >= cthresh):
consensus_cdr3 = groupby_cdr3aa_table.index.values[0]
IGH_cons = pd.Series(table_IGH.ix[table_IGH[table_IGH['cdr3aa'].str.contains(consensus_cdr3) == True].index.values[0]])
IGH_cons.ix['freq'] = groupby_cdr3aa_table.ix[0,'freq']
IGH_cons.ix['count'] = groupby_cdr3aa_table.ix[0,'count']
IGH = True
else:
IGH_cons = pd.Series(['No Consensus'], index=['v'])
IGH = True
#we have 1 cdr3 measured
elif (unique_cdr3 == 1):
if (groupby_cdr3aa_table.ix[0,'count'] >= cthresh):
consensus_cdr3 = groupby_cdr3aa_table.index.values[0]
IGH_cons = pd.Series(table_IGH.ix[table_IGH[table_IGH['cdr3aa'].str.contains(consensus_cdr3) == True].index.values[0]])
IGH_cons.ix['freq'] = groupby_cdr3aa_table.ix[0,'freq']
IGH_cons.ix['count'] = groupby_cdr3aa_table.ix[0,'count']
IGH = True
else:
IGH_cons = pd.Series(['No Consensus'], index=['v'])
IGH = True
#IGL Alignment
if (IGL == False):
index_IGL = table['v'].str.contains('IGLV')
table_IGL = table[index_IGL].reset_index(drop=True)
table_IGL = table_IGL.replace('\?', '0', regex=True)
groupby_cdr3aa = table_IGL[['freq','count']].groupby(table_IGL['cdr3aa'])
groupby_cdr3aa_table = groupby_cdr3aa.sum().sort_values('count', ascending=False)
unique_cdr3 = len(groupby_cdr3aa_table.index)
##---Determine the consensus IGL gene
#we have 2 or more cdr3s measured
if (unique_cdr3 > 1):
if (groupby_cdr3aa_table.ix[0,'count'] >= fthresh*groupby_cdr3aa_table.ix[1,'count'] and groupby_cdr3aa_table.ix[0,'count'] >= cthresh):
consensus_cdr3 = groupby_cdr3aa_table.index.values[0]
IGL_cons = pd.Series(table_IGL.ix[table_IGL[table_IGL['cdr3aa'].str.contains(consensus_cdr3) == True].index.values[0]])
IGL_cons.ix['freq'] = groupby_cdr3aa_table.ix[0,'freq']
IGL_cons.ix['count'] = groupby_cdr3aa_table.ix[0,'count']
IGL = True
else:
IGL_cons = pd.Series(['No Consensus'], index=['v'])
IGL = True
#we have 1 cdr3 measured
elif (unique_cdr3 == 1):
if (groupby_cdr3aa_table.ix[0,'count'] >= cthresh):
consensus_cdr3 = groupby_cdr3aa_table.index.values[0]
IGL_cons = pd.Series(table_IGL.ix[table_IGL[table_IGL['cdr3aa'].str.contains(consensus_cdr3) == True].index.values[0]])
IGL_cons.ix['freq'] = groupby_cdr3aa_table.ix[0,'freq']
IGL_cons.ix['count'] = groupby_cdr3aa_table.ix[0,'count']
IGL = True
else:
IGL_cons = pd.Series(['No Consensus'], index=['v'])
IGL = True
#IGK Alignment
if (IGK == False):
index_IGK = table['v'].str.contains('IGKV')
table_IGK = table[index_IGK].reset_index(drop=True)
table_IGK = table_IGK.replace('\?', '0', regex=True)
groupby_cdr3aa = table_IGK[['freq','count']].groupby(table_IGK['cdr3aa'])
groupby_cdr3aa_table = groupby_cdr3aa.sum().sort_values('count', ascending=False)
unique_cdr3 = len(groupby_cdr3aa_table.index)
##---Determine the consensus IGK gene
#we have 2 or more cdr3s measured
if (unique_cdr3 > 1):
if (groupby_cdr3aa_table.ix[0,'count'] >= fthresh*groupby_cdr3aa_table.ix[1,'count'] and groupby_cdr3aa_table.ix[0,'count'] >= cthresh):
consensus_cdr3 = groupby_cdr3aa_table.index.values[0]
IGK_cons = pd.Series(table_IGK.ix[table_IGK[table_IGK['cdr3aa'].str.contains(consensus_cdr3) == True].index.values[0]])
IGK_cons.ix['freq'] = groupby_cdr3aa_table.ix[0,'freq']
IGK_cons.ix['count'] = groupby_cdr3aa_table.ix[0,'count']
IGK = True
else:
IGK_cons = | pd.Series(['No Consensus'], index=['v']) | pandas.Series |
#!/usr/bin/env python
__author__ = "<NAME>"
import logging
import os
import re
import pandas
import numpy
import gzip
from timeit import default_timer as timer
from pyarrow import parquet as pq
from genomic_tools_lib import Logging, Utilities
from genomic_tools_lib.data_management import TextFileTools
from genomic_tools_lib.miscellaneous import matrices, PandasHelpers
from genomic_tools_lib.miscellaneous import Genomics
from genomic_tools_lib.file_formats import Parquet
class Context:
def __init__(self, args):
self.args = args
self.file_map = None
self.vmf = None
self.of = None
self.regions = None
def get_genotype_file(self, chromosome):
logging.info("Opening genotype for chromosome %d", chromosome)
g = pq.ParquetFile(self.file_map[chromosome])
return g
def __enter__(self):
logging.info("initializing resources")
logging.info("Loading regions")
regions = load_regions(self.args.region_file, self.args.chromosome)
if args.sub_batches and args.sub_batch is not None:
logging.log(9, "Selecting target regions from sub-batches")
regions = PandasHelpers.sub_batch(regions, args.sub_batches, args.sub_batch)
self.regions = regions
logging.info("Opening variants metadata")
self.vmf = pq.ParquetFile(args.parquet_genotype_metadata)
logging.info("Creating destination")
if args.text_output:
if os.path.exists(args.text_output):
raise RuntimeError("Output exists. Nope.")
Utilities.ensure_requisite_folders(args.text_output)
self.of = TextFileTools.TextDataSink(args.text_output, [("region", "id1", "id2", "value")])
self.of.initialize()
elif args.text_output_folder:
Utilities.maybe_create_folder(args.text_output_folder)
else:
raise RuntimeError("Unrecognized output specification")
if (args.parquet_genotype_folder and args.parquet_genotype_pattern):
self.file_map = get_file_map(args)
else:
raise RuntimeError("Unrecognized genotype specification")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
logging.info("finalizing resources")
if self.of:
self.of.finalize()
def sink(self, cov, ids, region):
logging.log(9, "Serializing covariance")
_region = "{}_{}_{}_{}".format(region.name, region.chr, region.start, region.stop)
if args.text_output:
if args.dapg_output:
raise RuntimeError("Not supported for this option")
else:
cov = matrices._flatten_matrix_data([(_region, ids, cov)])
self.of.sink(cov)
elif args.text_output_folder:
if args.dapg_output:
f = os.path.join(args.text_output_folder, _region) + ".txt.gz"
with gzip.open(f, "w") as o:
for i in range(0, cov.shape[0]):
l = "\t".join(["{:0.4f}".format(x) for x in cov[i]]) + "\n"
o.write(l.encode())
id = os.path.join(args.text_output_folder, _region) + ".id.txt.gz"
with gzip.open(id, "w") as o:
l = "\n".join(ids).encode()
o.write(l)
else:
cov = matrices._flatten_matrix_data_2(ids, cov)
cov = | pandas.DataFrame(cov) | pandas.DataFrame |
"""Functions to noise components based on selected strategey."""
import numpy as np
import pandas as pd
from .load_confounds_utils import (_add_suffix,
_check_params,
_find_confounds)
from .load_confounds_compcor import _find_compcor
from .load_confounds_scrub import _optimize_scrub
def _load_motion(confounds_raw, motion):
"""Load the motion regressors."""
motion_params = _add_suffix(
["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"],
motion,
)
_check_params(confounds_raw, motion_params)
return confounds_raw[motion_params]
def _load_high_pass(confounds_raw):
"""Load the high pass filter regressors."""
high_pass_params = _find_confounds(confounds_raw, ["cosine"])
return confounds_raw[high_pass_params]
def _load_wm_csf(confounds_raw, wm_csf):
"""Load the regressors derived from the white matter and CSF masks."""
wm_csf_params = _add_suffix(["csf", "white_matter"], wm_csf)
_check_params(confounds_raw, wm_csf_params)
return confounds_raw[wm_csf_params]
def _load_global_signal(confounds_raw, global_signal):
"""Load the regressors derived from the global signal."""
global_params = _add_suffix(["global_signal"], global_signal)
_check_params(confounds_raw, global_params)
return confounds_raw[global_params]
def _load_compcor(confounds_raw, meta_json, compcor, n_compcor):
"""Load compcor regressors."""
compcor_cols = _find_compcor(
meta_json, compcor, n_compcor
)
_check_params(confounds_raw, compcor_cols)
return confounds_raw[compcor_cols]
def _load_ica_aroma(confounds_raw, ica_aroma):
"""Load the ICA-AROMA regressors."""
if ica_aroma == "full":
return pd.DataFrame()
elif ica_aroma == "basic":
ica_aroma_params = _find_confounds(confounds_raw, ["aroma"])
return confounds_raw[ica_aroma_params]
else:
raise ValueError(
"Please select an option when using ICA-AROMA strategy."
f"Current input: {ica_aroma}"
)
def _load_scrub(confounds_raw, scrub, fd_threshold, std_dvars_threshold):
"""Remove volumes if FD and/or DVARS exceeds threshold."""
n_scans = len(confounds_raw)
# Get indices of fd outliers
fd_outliers_index = np.where(
confounds_raw["framewise_displacement"] > fd_threshold
)[0]
dvars_outliers_index = np.where(
confounds_raw["std_dvars"] > std_dvars_threshold
)[0]
motion_outliers_index = np.sort(
np.unique(np.concatenate((fd_outliers_index, dvars_outliers_index)))
)
# when motion outliers were detected, remove segments with too few
# timeframes if desired
if scrub > 0 and len(motion_outliers_index) > 0:
motion_outliers_index = _optimize_scrub(motion_outliers_index, n_scans,
scrub)
# Make one-hot encoded motion outlier regressors
motion_outlier_regressors = pd.DataFrame(
np.transpose(np.eye(n_scans)[motion_outliers_index]).astype(int)
)
column_names = [
"motion_outlier_" + str(num)
for num in range(np.shape(motion_outlier_regressors)[1])
]
motion_outlier_regressors.columns = column_names
return motion_outlier_regressors
def _load_non_steady_state(confounds_raw):
"""Find non steady state regressors."""
nss_outliers = _find_confounds(confounds_raw, ["non_steady_state"])
if nss_outliers:
return confounds_raw[nss_outliers]
else:
return | pd.DataFrame() | pandas.DataFrame |
"""Version the GTFS by changing all the Id of each table."""
import logging
import pandas as pd
import utilities.pandas_tools as pt
from utilities.decorator import logged
from mixer.glogger import logger
class Model(object):
"""Transform Id of each entitie into sha1."""
def __init__(self, dict_df_mapped):
"""Constructor."""
self.dict_df_mapped = dict_df_mapped
self.set_agency_id
self.set_gtfs_id
@property
def set_agency_id(self):
"""Extract the id agency."""
self.agency_id = self.dict_df_mapped["Agency"]["Id"].iloc[0]
@property
def set_gtfs_id(self):
"""Extract gtfs id."""
self.gtfs_id = self.dict_df_mapped["Gtfs"]["Id"].iloc[0]
def gen_args(self, _id, seq, cols):
"""Gen args for mp."""
return [
(
uid,
_id,
seq,
cols
)
for uid in self.df[_id].unique()
]
def gen_sha1(self, uid, _id, seq, cols):
"""Gen sha1 based on a sequence of ."""
data = self.df[self.df[_id] == uid]
df_uid = pt.hash_frame(data.sort_values(by=seq)[cols])
return [uid, df_uid]
def mp_sha(self, args):
"""Mp for gen sha1."""
return self.gen_sha1(*args)
@logged(level=logging.INFO, name=logger)
def gen_new_id_on_seq(self, table, cols, seq, _id):
"""Generate a sha1 for a sequence of stops."""
logger.log(logging.INFO, "generate sha1 for {}".format(table))
df = self.dict_df_mapped[table].copy()
self.df = df
lst = []
# If we want to multiprocess the generation of the sha1 use next one,
# else still use the code row 72 => 75
# import multiprocessing as mp
# from tqdm import tqdm
# l_args = self.gen_args(_id, seq, cols)
# pool = mp.Pool()
# ln = len(l_args)
# for sha in tqdm(pool.imap_unordered(self.mp_sha, l_args), total=ln):
# lst.append(sha)
# pool.close()
for sdl_id, uid in df.groupby(_id):
df_uid = pt.hash_frame(uid.sort_values(by=seq)[cols])
lst.append([sdl_id, df_uid])
on_cols = ["Id", table + "ScheduleId"]
return pd.DataFrame(lst, columns=on_cols)
@logged(level=logging.INFO, name=logger)
def gen_new_id_on_cols(self, table, cols, _id):
"""Generate a sha1 for a serie."""
logger.log(logging.INFO, "generate sha1 for {}".format(table))
df = self.dict_df_mapped[table].copy()
sdl_id = table + "ScheduleId"
df[sdl_id] = pt.sha1_for_named_columns(df, cols)
on_cols = [_id, sdl_id]
return df[on_cols]
@logged(level=logging.INFO, name=logger)
def new_id(self):
"""Generate the table of correspondance for new id."""
vstops = self.gen_new_id_on_cols(
"Stop", ["Latitude", "Longitude"], "Id")
vroutes = self.gen_new_id_on_cols(
"Route", ["ShortName", "LongName"], "Id")
vshapes = self.gen_new_id_on_seq(
"Shape", ["Latitude", "Longitude", "GtfsId"],
"ShapeSequence", "Id")
vtrips = self.gen_new_id_on_seq(
"StopTime", ["ArrivalTimeSeconds", "StopId", "GtfsId", "ServiceId"],
"StopSequence", "TripId")
vtrips = vtrips.rename(
columns={"StopTimeScheduleId": "TripScheduleId"})
return vstops, vroutes, vshapes, vtrips
@logged(level=logging.INFO, name=logger)
def merge_stops(self, vstops):
"""Change stop_id to new sha1."""
stops = self.dict_df_mapped["Stop"].copy()
stops = pd.merge(stops, vstops, on="Id")
def merge_parent_station(stops, vstops):
"""Change the parent stop id to sha1."""
vstops = vstops.copy()
vstops = vstops.rename(columns={
"Id": "ParentStopId", "StopScheduleId": "ParentScheduleId"})
stops = pd.merge(stops, vstops, on="ParentStopId", how="left")
return stops
stops = merge_parent_station(stops, vstops)
stops["Id"] = stops["StopScheduleId"]
stops["ParentStopId"] = stops["ParentScheduleId"]
stops = pt.change_nan_value(stops, None)
return stops.drop_duplicates(subset="Id")
@logged(level=logging.INFO, name=logger)
def merge_routes(self, vroutes):
"""Change the route_id to new sha1."""
routes = self.dict_df_mapped["Route"].copy()
routes = pd.merge(routes, vroutes, on="Id")
routes["Id"] = routes["RouteScheduleId"]
routes["AgencyId"] = pt.change_nan_value(routes["AgencyId"], self.agency_id)
return routes.drop_duplicates(subset="Id")
@logged(level=logging.INFO, name=logger)
def merge_stoptimes(self, vtrips, vstops):
"""Change the trip_id and stop_id with the new sha1."""
stimes = self.dict_df_mapped["StopTime"].copy()
vtrips = vtrips.rename(columns={"Id": "TripId"})
vstops = vstops.rename(columns={"Id": "StopId"})
stimes = pd.merge(stimes, vtrips, on="TripId")
stimes = pd.merge(stimes, vstops, on="StopId")
stimes["TripId"] = stimes["TripScheduleId"]
stimes["StopId"] = stimes["StopScheduleId"]
return stimes
@logged(level=logging.INFO, name=logger)
def merge_shapes(self, vshapes):
"""Change the shape_id to new sha1."""
shapes = self.dict_df_mapped["Shape"].copy()
shapes = pd.merge(shapes, vshapes, on="Id")
shapes["Id"] = shapes["ShapeScheduleId"]
shapes = pt.change_nan_value(shapes, None)
return shapes
@logged(level=logging.INFO, name=logger)
def merge_trips(self, vtrips, vroutes, vshapes):
"""Change the trip_id and route_id and shape_id to sha1."""
trips = self.dict_df_mapped["Trip"].copy()
vroutes = vroutes.rename(columns={"Id": "RouteId"})
vshapes = vshapes.rename(columns={"Id": "ShapeId"})
trips = pd.merge(trips, vtrips, on="Id", how="left")
trips = pd.merge(trips, vroutes, on="RouteId")
trips = pd.merge(trips, vshapes, on="ShapeId")
trips["Id"] = trips["TripScheduleId"]
trips["RouteId"] = trips["RouteScheduleId"]
trips["ShapeId"] = trips["ShapeScheduleId"]
return trips
def merge_date_trips(self, vtrips):
"""Change the trip_id in trip2date."""
ttd = self.dict_df_mapped["TripToDate"].copy()
trips = vtrips[["Id", "TripScheduleId"]]
ttd = pd.merge(ttd, trips, left_on="TripId", right_on="Id")
ttd["TripId"] = ttd["TripScheduleId"]
return ttd
def set_trip_gtfs(self, vtrips, trips):
"""Create Trip2Gtfs."""
vtrips["GtfsId"] = self.gtfs_id
trips = trips[["Headsign", "ShortName", "TripScheduleId"]]
vtrips = pd.merge(vtrips, trips, on="TripScheduleId")
_id = vtrips["TripScheduleId"].copy()
vtrips["TripScheduleId"] = vtrips["Id"]
vtrips["Id"] = _id
return vtrips
def set_route_gtfs(self, vroutes, routes):
"""Create Route2Gtfs."""
vroutes["GtfsId"] = self.gtfs_id
routes = routes[["ShortName", "LongName", "RouteScheduleId"]]
vroutes = pd.merge(vroutes, routes, on="RouteScheduleId")
_id = vroutes["RouteScheduleId"].copy()
vroutes["RouteScheduleId"] = vroutes["Id"]
vroutes["Id"] = _id
return vroutes
def set_stop_gtfs(self, vstops, stops):
"""Create Stop2Gtfs."""
vstops["GtfsId"] = self.gtfs_id
stops = stops[["Name", "ZoneId", "StopScheduleId"]]
vstops = | pd.merge(vstops, stops, on="StopScheduleId") | pandas.merge |
import uuid
import numpy as np
import pandas as pd
from irspack.split import split_last_n_interaction_df
RNS = np.random.RandomState(0)
n_users = 1000
n_items = 512
df_size = 10000
user_ids = np.asarray([str(uuid.uuid1()) for _ in range(n_users)])
item_ids = np.asarray([str(uuid.uuid1()) for _ in range(n_items)])
df_master = pd.DataFrame(
dict(
user_id=RNS.choice(user_ids, size=df_size, replace=True),
item_id=RNS.choice(item_ids, size=df_size, replace=True),
ts=RNS.randint(0, 100, size=df_size).astype("datetime64[D]"),
)
).drop_duplicates()
def test_holdout_fixed_n() -> None:
df = df_master.copy()
df_train, df_val = split_last_n_interaction_df(df, "user_id", "ts", 2)
assert pd.concat([df_train, df_val]).merge(df).shape[0] == df_master.shape[0]
uid_val_unique = np.unique(df_val.user_id)
for uid in uid_val_unique:
interactions_in_val = df_val[df_val.user_id == uid]
if interactions_in_val.shape[0] == 0:
continue
assert interactions_in_val.shape[0] <= 2
interactions_in_train = df_train[df_train.user_id == uid]
if interactions_in_train.shape[0] == 0:
continue
assert interactions_in_train.ts.max() <= interactions_in_val.ts.min()
def test_holdout_fixed_percentage() -> None:
df = df_master.copy()
df_train, df_val = split_last_n_interaction_df(
df, "user_id", "ts", heldout_ratio=0.5
)
assert | pd.concat([df_train, df_val]) | pandas.concat |
#!/usr/bin/env python3
"""
Correct bed file to be compatible with bedToBigBed tool.
1.) restrict all scores to the maximal value of 1000,
2.) in strand column replace '?' with '.'.
"""
import argparse
import pandas as pd
from pandas.errors import EmptyDataError
from resolwe_runtime_utils import error
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-f", "--bed_file", help="Bed file.")
args = parser.parse_args()
try:
df = pd.read_csv(args.bed_file, delimiter="\t", header=None, dtype=str)
except EmptyDataError:
print(
error(
f"The input BED file {args.bed_file} is empty. Your analysis might "
f"have failed to identify regions of interest (peaks, junctions, etc.)."
)
)
else:
df.iloc[:, 4] = | pd.to_numeric(df.iloc[:, 4]) | pandas.to_numeric |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from application import model_builder
def test_validate_types_numeric_success():
# Arrange
df = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.