prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import random
import argparse
import pyexr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import struct
import torch
import torchvision
from torch import Tensor
from torch.utils.data import Dataset
from tqdm import tqdm
# small epsilon to avoid nan
SMALL_EPSILON = 1e-6
GAMMA = 2.2
INV_GAMMA = 1 / GAMMA
def random_crop_tensor(input, crop_size):
assert(len(input.shape) == 3)
random.seed()
_, h, w = input.shape
crop_h = max(0, h - crop_size)
crop_w = max(0, w - crop_size)
random_anchor = [int(random.random() * crop_h), int(random.random() * crop_w)]
return input[:, random_anchor[0] : min(random_anchor[0] + crop_size, h), random_anchor[1] : min(random_anchor[1] + crop_size, w)]
class SupersampleDataset(Dataset):
def __init__(self, src_folder: str, data_types_to_fetch: list, crop_size=256):
csv_path = os.path.join(src_folder, "data.csv")
if not os.path.exists(os.path.join(src_folder, "data.csv")):
build_dataset_csv(src_folder)
self.src_folder = src_folder
self.fh_frame = pd.read_csv(csv_path)
self.data_types_to_fetch = data_types_to_fetch
self.crop_size = crop_size
def __len__(self):
return len(self.fh_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = {}
# mat_diffuse", "mat_ref", "mat_spec_rough", "world_normal", "world_pos
for i, fh in enumerate(self.fh_frame.values[idx]):
data_type = self.fh_frame.columns[i]
if data_type in self.data_types_to_fetch:
img_path = os.path.join(self.src_folder, fh)
if data_type in ["half", "full", "clean"]:
# min_max_arr = np.load(os.path.join(self.src_folder, '{}_min_max.npy'.format(data_type)))
# img_np = torch.load(img_path)[:, :1016, :].numpy()
# img_np = np.transpose(img_np, axes=(2, 1, 0)) - min_max_arr[0]
# img_np = img_np / (min_max_arr[1] - min_max_arr[0])
# img_tensor = torch.tensor(np.transpose(img_np, axes=(2, 1, 0)))
image = torch.load(img_path)[:, :1016, :]
image = torch.pow(image, INV_GAMMA)
image = torch.clamp(image, 0, 1)
elif data_type in ["mat_ref", "mat_spec_rough"]:
image = torch.unsqueeze(torch.load(img_path)[0, :1016, :], 0)
else:
image = torch.load(img_path)[:, :1016, :]
# image = random_crop_tensor(image, self.crop_size)
sample[data_type] = image
return sample
class DenoiseDataset(Dataset):
def __init__(self, src_folder: str, crop_size=256, gamma_trans=True):
csv_path = os.path.join(src_folder, "data.csv")
if not os.path.exists(os.path.join(src_folder, "data.csv")):
build_dataset_csv(src_folder)
self.src_folder = src_folder
self.fh_frame = pd.read_csv(csv_path)
self.data_types_to_fetch = ["full", "mat_diffuse", "mat_ref", "mat_spec_rough", "world_normal", "world_pos", "clean"]
self.crop_size = crop_size
self.gamma_trans = gamma_trans
def __len__(self):
return len(self.fh_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = {}
for i, fh in enumerate(self.fh_frame.values[idx]):
data_type = self.fh_frame.columns[i]
if data_type in self.data_types_to_fetch:
img_path = os.path.join(self.src_folder, fh)
if data_type in ["full", "clean"]:
if self.gamma_trans:
image = torch.pow(torch.load(img_path)[:, :1016, :], INV_GAMMA)
image = torch.clamp(torch.load(img_path)[:, :1016, :], 0, 1)
elif data_type in ["mat_ref", "mat_spec_rough"]:
image = torch.unsqueeze(torch.load(img_path)[0, :1016, :], 0)
else:
image = torch.load(img_path)[:, :1016, :]
# image = random_crop_tensor(image, self.crop_size)
sample[data_type] = image
return sample
def build_dataset_csv(src_folder: str):
src_file_denoise_format = "Clean"
src_file_gbuff_format = ["MaterialDiffuse", "MaterialIoR",
"MaterialSpecRough", "WorldNormal", "WorldPosition"]
src_file_rt_full_1spp = "Full"
src_file_rt_half_0_5spp = "Half"
src_file_rt_full_4spp = "4spp"
data = {'clean': {}, 'full': {}, 'half': {}, 'mat_diffuse': {}, 'mat_ref': {},
'mat_spec_rough': {}, "world_normal": {}, "world_pos": {}, "4spp": {}}
files = os.listdir(src_folder)
for file in files:
file_type = file.split('-')[0]
idx = int(file.split('-')[-1].split('.')[0])
if file_type == src_file_denoise_format:
data['clean'][idx] = file
elif file_type == src_file_rt_full_1spp:
data['full'][idx] = file
elif file_type == src_file_rt_half_0_5spp:
data['half'][idx] = file
elif file_type == src_file_gbuff_format[0]:
data['mat_diffuse'][idx] = file
elif file_type == src_file_gbuff_format[1]:
data['mat_ref'][idx] = file
elif file_type == src_file_gbuff_format[2]:
data['mat_spec_rough'][idx] = file
elif file_type == src_file_gbuff_format[3]:
data['world_normal'][idx] = file
elif file_type == src_file_gbuff_format[4]:
data['world_pos'][idx] = file
elif file_type == src_file_rt_full_4spp:
data['4spp'][idx] = file
else:
raise NotImplementedError
keys_to_remove = []
for key, value in data.items():
idx = list(value.keys())
fh = list(value.values())
if len(idx) > 0:
zipped_lists = zip(idx, fh)
sorted_zipped_lists = sorted(zipped_lists)
sorted_list = [handle for _, handle in sorted_zipped_lists]
data[key] = sorted_list
else:
keys_to_remove.append(key)
for key in keys_to_remove:
data.pop(key)
df = | pd.DataFrame(data=data) | pandas.DataFrame |
#
# Copyright (c) 2021, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import matplotlib.pyplot as plt
import pandas as pd
from scikitplot.estimators import plot_learning_curve
from scikitplot.metrics import plot_precision_recall
from sklearn.base import is_regressor, is_classifier
from sklearn.cluster import KMeans
from sklearn.metrics import explained_variance_score, max_error, mean_absolute_error, r2_score, \
precision_recall_fscore_support
from yellowbrick.classifier import ClassificationReport, ConfusionMatrix, ROCAUC, ClassPredictionError
from yellowbrick.cluster import SilhouetteVisualizer, KElbowVisualizer
from yellowbrick.model_selection import FeatureImportances
from yellowbrick.regressor import ResidualsPlot, PredictionError, CooksDistance
import neptune.new as neptune
def create_regressor_summary(regressor, X_train, X_test, y_train, y_test, nrows=1000, log_charts=True):
"""Create sklearn regressor summary.
This method creates regressor summary that includes:
* all regressor parameters,
* pickled estimator (model),
* test predictions,
* test scores,
* model performance visualizations.
Returned ``dict`` can be assigned to the run's namespace defined by the user (see example below).
Regressor should be fitted before calling this function.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
regressor (:obj:`regressor`):
| Fitted sklearn regressor object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The regression target for training
y_test (:obj:`ndarray`):
| The regression target for testing
nrows (`int`, optional, default is 1000):
| Log first ``nrows`` rows of test predictions.
log_charts (:bool:, optional, default is ``True``):
| If ``True``, calculate and log chart visualizations.
|
| NOTE: calculating visualizations is potentially expensive depending on input data and regressor, and
| may take some time to finish.
|
| This is equivalent to calling ``log_learning_curve_chart``, ``log_feature_importance_chart``,
| ``log_residuals_chart``, ``log_prediction_error_chart``, ``log_cooks_distance_chart``
| functions from this module.
Returns:
``dict`` with all summary items.
Examples:
Log random forest regressor summary.
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['random_forest/summary'] = npt_utils.create_regressor_summary(rfr, X_train, X_test, y_train, y_test)
"""
assert is_regressor(regressor), 'regressor should be sklearn regressor.'
reg_summary = dict()
reg_summary['all_params'] = get_estimator_params(regressor)
reg_summary['pickled_model'] = get_pickled_model(regressor)
y_pred = regressor.predict(X_test)
reg_summary['test'] = {'preds': get_test_preds(regressor, X_test, y_test, y_pred=y_pred, nrows=nrows),
'scores': get_scores(regressor, X_test, y_test, y_pred=y_pred)}
if log_charts:
reg_summary['diagnostics_charts'] = {
'learning_curve': create_learning_curve_chart(regressor, X_train, y_train),
'feature_importance': create_feature_importance_chart(regressor, X_train, y_train),
'residuals': create_residuals_chart(regressor, X_train, X_test, y_train, y_test),
'prediction_error': create_prediction_error_chart(regressor, X_train, X_test, y_train, y_test),
'cooks_distance': create_cooks_distance_chart(regressor, X_train, y_train)}
return reg_summary
def create_classifier_summary(classifier, X_train, X_test, y_train, y_test, nrows=1000, log_charts=True):
"""Create sklearn classifier summary.
This method creates classifier summary that includes:
* all classifier parameters,
* pickled estimator (model),
* test predictions,
* test predictions probabilities,
* test scores,
* model performance visualizations.
Returned ``dict`` can be assigned to the run's namespace defined by the user (see example below).
Classifier should be fitted before calling this function.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The classification target for training
y_test (:obj:`ndarray`):
| The classification target for testing
nrows (`int`, optional, default is 1000):
| Log first ``nrows`` rows of test predictions and predictions probabilities.
log_charts (:bool:, optional, default is ``True``):
| If True, calculate and send chart visualizations.
|
| NOTE: calculating visualizations is potentially expensive depending on input data and classifier, and
| may take some time to finish.
|
| This is equivalent to calling ``log_classification_report_chart``, ``log_confusion_matrix_chart``,
| ``log_roc_auc_chart``, ``log_precision_recall_chart``, ``log_class_prediction_error_chart``
| functions from this module.
Returns:
``dict`` with all summary items.
Examples:
Log random forest classifier summary.
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['random_forest/summary'] = npt_utils.create_classifier_summary(rfc, X_train, X_test, y_train, y_test)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
cls_summary = dict()
cls_summary['all_params'] = get_estimator_params(classifier)
cls_summary['pickled_model'] = get_pickled_model(classifier)
y_pred = classifier.predict(X_test)
cls_summary['test'] = {'preds': get_test_preds(classifier, X_test, y_test, y_pred=y_pred, nrows=nrows),
'preds_proba': get_test_preds_proba(classifier, X_test, nrows=nrows),
'scores': get_scores(classifier, X_test, y_test, y_pred=y_pred)}
if log_charts:
cls_summary['diagnostics_charts'] = {
'classification_report': create_classification_report_chart(classifier, X_train, X_test, y_train, y_test),
'confusion_matrix': create_confusion_matrix_chart(classifier, X_train, X_test, y_train, y_test),
'ROC_AUC': create_roc_auc_chart(classifier, X_train, X_test, y_train, y_test),
'precision_recall': create_precision_recall_chart(classifier, X_test, y_test),
'class_prediction_error': create_class_prediction_error_chart(classifier, X_train, X_test, y_train, y_test)}
return cls_summary
def create_kmeans_summary(model, X, nrows=1000, **kwargs):
"""Create sklearn kmeans summary.
This method fit KMeans model to data and logs:
* all kmeans parameters,
* cluster labels,
* clustering visualizations: KMeans elbow chart and silhouette coefficients chart.
Returned ``dict`` can be assigned to the run's namespace defined by the user (see example below).
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
model (:obj:`KMeans`):
| KMeans object.
X (:obj:`ndarray`):
| Training instances to cluster.
nrows (`int`, optional, default is 1000):
| Number of rows to log in the cluster labels.
kwargs:
KMeans parameters.
Returns:
``dict`` with all summary items.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
km = KMeans(n_init=11, max_iter=270)
X, y = make_blobs(n_samples=579, n_features=17, centers=7, random_state=28743)
run = neptune.init(project='my_workspace/my_project')
run['kmeans/summary'] = npt_utils.create_kmeans_summary(km, X)
"""
assert isinstance(model, KMeans), 'model should be sklearn KMeans instance'
kmeans_summary = dict()
model.set_params(**kwargs)
kmeans_summary['all_params'] = get_estimator_params(model)
kmeans_summary['cluster_labels'] = get_cluster_labels(model, X, nrows=nrows, **kwargs)
kmeans_summary['diagnostics_charts'] = {
'kelbow': create_kelbow_chart(model, X, **kwargs),
'silhouette': create_silhouette_chart(model, X, **kwargs)}
return kmeans_summary
def get_estimator_params(estimator):
"""Get estimator parameters.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
estimator (:obj:`estimator`):
| Scikit-learn estimator from which to log parameters.
Returns:
``dict`` with all parameters mapped to their values.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfr = RandomForestRegressor()
run = neptune.init(project='my_workspace/my_project')
run['estimator/params'] = npt_utils.get_estimator_params(rfr)
"""
assert is_regressor(estimator) or is_classifier(estimator) or isinstance(estimator, KMeans),\
'Estimator should be sklearn regressor, classifier or kmeans clusterer.'
return estimator.get_params()
def get_pickled_model(estimator):
"""Get pickled estimator.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
estimator (:obj:`estimator`):
| Scikit-learn estimator to pickle.
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfr = RandomForestRegressor()
run = neptune.init(project='my_workspace/my_project')
run['estimator/pickled_model'] = npt_utils.get_pickled_model(rfr)
"""
assert is_regressor(estimator) or is_classifier(estimator),\
'Estimator should be sklearn regressor or classifier.'
return neptune.types.File.as_pickle(estimator)
def get_test_preds(estimator, X_test, y_test, y_pred=None, nrows=1000):
"""Get test predictions.
If you pass ``y_pred``, then predictions are not computed from ``X_test`` data.
Estimator should be fitted before calling this function.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
estimator (:obj:`estimator`):
| Scikit-learn estimator to compute predictions.
X_test (:obj:`ndarray`):
| Testing data matrix.
y_test (:obj:`ndarray`):
| Target for testing.
y_pred (:obj:`ndarray`, optional, default is ``None``):
| Estimator predictions on test data.
nrows (`int`, optional, default is 1000):
| Number of rows to log.
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfr = RandomForestRegressor()
run = neptune.init(project='my_workspace/my_project')
run['estimator/pickled_model'] = npt_utils.compute_test_preds(rfr, X_test, y_test)
"""
assert is_regressor(estimator) or is_classifier(estimator),\
'Estimator should be sklearn regressor or classifier.'
assert isinstance(nrows, int), 'nrows should be integer, {} was passed'.format(type(nrows))
preds = None
if y_pred is None:
y_pred = estimator.predict(X_test)
# single output
if len(y_pred.shape) == 1:
df = pd.DataFrame(data={'y_true': y_test, 'y_pred': y_pred})
df = df.head(n=nrows)
preds = neptune.types.File.as_html(df)
# multi output
if len(y_pred.shape) == 2:
df = pd.DataFrame()
for j in range(y_pred.shape[1]):
df['y_test_output_{}'.format(j)] = y_test[:, j]
df['y_pred_output_{}'.format(j)] = y_pred[:, j]
df = df.head(n=nrows)
preds = neptune.types.File.as_html(df)
return preds
def get_test_preds_proba(classifier, X_test=None, y_pred_proba=None, nrows=1000):
"""Get test predictions probabilities.
If you pass ``X_test``, then predictions probabilities are computed from data.
If you pass ``y_pred_proba``, then predictions probabilities are not computed from ``X_test`` data.
Estimator should be fitted before calling this function.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
classifier (:obj:`classifier`):
| Scikit-learn classifier to compute predictions probabilities.
X_test (:obj:`ndarray`):
| Testing data matrix.
y_pred_proba (:obj:`ndarray`, optional, default is ``None``):
| Classifier predictions probabilities on test data.
nrows (`int`, optional, default is 1000):
| Number of rows to log.
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['estimator/pickled_model'] = npt_utils.compute_test_preds(rfc, X_test)
"""
assert is_classifier(classifier), 'Classifier should be sklearn classifier.'
assert isinstance(nrows, int), 'nrows should be integer, {} was passed'.format(type(nrows))
if X_test is not None and y_pred_proba is not None:
raise ValueError('X_test and y_pred_proba are mutually exclusive')
if X_test is None and y_pred_proba is None:
raise ValueError('X_test or y_pred_proba is required')
if y_pred_proba is None:
try:
y_pred_proba = classifier.predict_proba(X_test)
except Exception as e:
print('This classifier does not provide predictions probabilities. Error: {}'.format(e))
return
df = pd.DataFrame(data=y_pred_proba, columns=classifier.classes_)
df = df.head(n=nrows)
return neptune.types.File.as_html(df)
def get_scores(estimator, X, y, y_pred=None):
"""Get estimator scores on ``X``.
If you pass ``y_pred``, then predictions are not computed from ``X`` and ``y`` data.
Estimator should be fitted before calling this function.
**Regressor**
For regressors that outputs single value, following scores are logged:
* explained variance
* max error
* mean absolute error
* r2
For multi-output regressor:
* r2
**Classifier**
For classifier, following scores are logged:
* precision
* recall
* f beta score
* support
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
estimator (:obj:`estimator`):
| Scikit-learn estimator to compute scores.
X (:obj:`ndarray`):
| Data matrix.
y (:obj:`ndarray`):
| Target for testing.
y_pred (:obj:`ndarray`, optional, default is ``None``):
| Estimator predictions on data.
Returns:
``dict`` with scores.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['estimator/scores'] = npt_utils.get_scores(rfc, X, y)
"""
assert is_regressor(estimator) or is_classifier(estimator),\
'Estimator should be sklearn regressor or classifier.'
scores_dict = {}
if y_pred is None:
y_pred = estimator.predict(X)
if is_regressor(estimator):
# single output
if len(y_pred.shape) == 1:
evs = explained_variance_score(y, y_pred)
me = max_error(y, y_pred)
mae = mean_absolute_error(y, y_pred)
r2 = r2_score(y, y_pred)
scores_dict['explained_variance_score'] = evs
scores_dict['max_error'] = me
scores_dict['mean_absolute_error'] = mae
scores_dict['r2_score'] = r2
# multi output
if len(y_pred.shape) == 2:
r2 = estimator.score(X, y)
scores_dict['r2_score'] = r2
elif is_classifier(estimator):
precision, recall, fbeta_score, support = precision_recall_fscore_support(y, y_pred)
for i, value in enumerate(precision):
scores_dict['class_{}'.format(i)] = {'precision': value,
'recall': recall[i],
'fbeta_score': fbeta_score[i],
'support': support[i]}
return scores_dict
def create_learning_curve_chart(regressor, X_train, y_train):
"""Create learning curve chart.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
regressor (:obj:`regressor`):
| Fitted sklearn regressor object
X_train (:obj:`ndarray`):
| Training data matrix
y_train (:obj:`ndarray`):
| The regression target for training
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['visuals/learning_curve'] = npt_utils.create_learning_curve_chart(rfr, X_train, y_train)
"""
assert is_regressor(regressor), 'regressor should be sklearn regressor.'
chart = None
try:
fig, ax = plt.subplots()
plot_learning_curve(regressor, X_train, y_train, ax=ax)
chart = neptune.types.File.as_image(fig)
plt.close(fig)
except Exception as e:
print('Did not log learning curve chart. Error: {}'.format(e))
return chart
def create_feature_importance_chart(regressor, X_train, y_train):
"""Create feature importance chart.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
regressor (:obj:`regressor`):
| Fitted sklearn regressor object
X_train (:obj:`ndarray`):
| Training data matrix
y_train (:obj:`ndarray`):
| The regression target for training
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['visuals/feature_importance'] = npt_utils.create_feature_importance_chart(rfr, X_train, y_train)
"""
assert is_regressor(regressor), 'regressor should be sklearn regressor.'
chart = None
try:
fig, ax = plt.subplots()
visualizer = FeatureImportances(regressor, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.finalize()
chart = neptune.types.File.as_image(fig)
plt.close(fig)
except Exception as e:
print('Did not log feature importance chart. Error: {}'.format(e))
return chart
def create_residuals_chart(regressor, X_train, X_test, y_train, y_test):
"""Create residuals chart.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
regressor (:obj:`regressor`):
| Fitted sklearn regressor object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The regression target for training
y_test (:obj:`ndarray`):
| The regression target for testing
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['visuals/residuals'] = npt_utils.create_residuals_chart(rfr, X_train, X_test, y_train, y_test)
"""
assert is_regressor(regressor), 'regressor should be sklearn regressor.'
chart = None
try:
fig, ax = plt.subplots()
visualizer = ResidualsPlot(regressor, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
chart = neptune.types.File.as_image(fig)
plt.close(fig)
except Exception as e:
print('Did not log residuals chart. Error: {}'.format(e))
return chart
def create_prediction_error_chart(regressor, X_train, X_test, y_train, y_test):
"""Create prediction error chart.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
regressor (:obj:`regressor`):
| Fitted sklearn regressor object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The regression target for training
y_test (:obj:`ndarray`):
| The regression target for testing
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['prediction_error'] = npt_utils.create_prediction_error_chart(rfr, X_train, X_test, y_train, y_test)
"""
assert is_regressor(regressor), 'regressor should be sklearn regressor.'
chart = None
try:
fig, ax = plt.subplots()
visualizer = PredictionError(regressor, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
chart = neptune.types.File.as_image(fig)
plt.close(fig)
except Exception as e:
print('Did not log prediction error chart. Error: {}'.format(e))
return chart
def create_cooks_distance_chart(regressor, X_train, y_train):
"""Create cooks distance chart.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
regressor (:obj:`regressor`):
| Fitted sklearn regressor object
X_train (:obj:`ndarray`):
| Training data matrix
y_train (:obj:`ndarray`):
| The regression target for training
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfr = RandomForestRegressor()
rfr.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['visuals/cooks_distance'] = npt_utils.create_cooks_distance_chart(rfr, X_train, y_train)
"""
assert is_regressor(regressor), 'regressor should be sklearn regressor.'
chart = None
try:
fig, ax = plt.subplots()
visualizer = CooksDistance(ax=ax)
visualizer.fit(X_train, y_train)
visualizer.finalize()
chart = neptune.types.File.as_image(fig)
plt.close(fig)
except Exception as e:
print('Did not log cooks distance chart. Error: {}'.format(e))
return chart
def create_classification_report_chart(classifier, X_train, X_test, y_train, y_test):
"""Create classification report chart.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The classification target for training
y_test (:obj:`ndarray`):
| The classification target for testing
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['visuals/classification_report'] = \
npt_utils.create_classification_report_chart(rfc, X_train, X_test, y_train, y_test)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
chart = None
try:
fig, ax = plt.subplots()
visualizer = ClassificationReport(classifier, support=True, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
chart = neptune.types.File.as_image(fig)
plt.close(fig)
except Exception as e:
print('Did not log Classification Report chart. Error: {}'.format(e))
return chart
def create_confusion_matrix_chart(classifier, X_train, X_test, y_train, y_test):
"""Create confusion matrix.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The classification target for training
y_test (:obj:`ndarray`):
| The classification target for testing
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['visuals/confusion_matrix'] = \
npt_utils.create_confusion_matrix_chart(rfc, X_train, X_test, y_train, y_test)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
chart = None
try:
fig, ax = plt.subplots()
visualizer = ConfusionMatrix(classifier, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
chart = neptune.types.File.as_image(fig)
plt.close(fig)
except Exception as e:
print('Did not log Confusion Matrix chart. Error: {}'.format(e))
return chart
def create_roc_auc_chart(classifier, X_train, X_test, y_train, y_test):
"""Create ROC-AUC chart.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The classification target for training
y_test (:obj:`ndarray`):
| The classification target for testing
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['visuals/roc_auc'] = npt_utils.create_roc_auc_chart(rfc, X_train, X_test, y_train, y_test)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
chart = None
try:
fig, ax = plt.subplots()
visualizer = ROCAUC(classifier, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
chart = neptune.types.File.as_image(fig)
plt.close(fig)
except Exception as e:
print('Did not log ROC-AUC chart. Error {}'.format(e))
return chart
def create_precision_recall_chart(classifier, X_test, y_test, y_pred_proba=None):
"""Create precision recall chart.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_test (:obj:`ndarray`):
| Testing data matrix
y_test (:obj:`ndarray`):
| The classification target for testing
y_pred_proba (:obj:`ndarray`, optional, default is ``None``):
| Classifier predictions probabilities on test data.
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['visuals/precision_recall'] = npt_utils.create_precision_recall_chart(rfc, X_test, y_test)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
chart = None
if y_pred_proba is None:
try:
y_pred_proba = classifier.predict_proba(X_test)
except Exception as e:
print('Did not log Precision-Recall chart: this classifier does not provide predictions probabilities.'
'Error {}'.format(e))
return chart
try:
fig, ax = plt.subplots()
plot_precision_recall(y_test, y_pred_proba, ax=ax)
chart = neptune.types.File.as_image(fig)
plt.close(fig)
except Exception as e:
print('Did not log Precision-Recall chart. Error {}'.format(e))
return chart
def create_class_prediction_error_chart(classifier, X_train, X_test, y_train, y_test):
"""Create class prediction error chart.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
classifier (:obj:`classifier`):
| Fitted sklearn classifier object
X_train (:obj:`ndarray`):
| Training data matrix
X_test (:obj:`ndarray`):
| Testing data matrix
y_train (:obj:`ndarray`):
| The classification target for training
y_test (:obj:`ndarray`):
| The classification target for testing
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
run = neptune.init(project='my_workspace/my_project')
run['visuals/class_prediction_error'] = \
npt_utils.create_class_prediction_error_chart(rfc, X_train, X_test, y_train, y_test)
"""
assert is_classifier(classifier), 'classifier should be sklearn classifier.'
chart = None
try:
fig, ax = plt.subplots()
visualizer = ClassPredictionError(classifier, is_fitted=True, ax=ax)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.finalize()
chart = neptune.types.File.as_image(fig)
plt.close(fig)
except Exception as e:
print('Did not log Class Prediction Error chart. Error {}'.format(e))
return chart
def get_cluster_labels(model, X, nrows=1000, **kwargs):
"""Log index of the cluster label each sample belongs to.
Tip:
Check Sklearn-Neptune integration
`documentation <https://docs-beta.neptune.ai/essentials/integrations/machine-learning-frameworks/sklearn>`_
for the full example.
Args:
model (:obj:`KMeans`):
| KMeans object.
X (:obj:`ndarray`):
| Training instances to cluster.
nrows (`int`, optional, default is 1000):
| Number of rows to log.
kwargs:
KMeans parameters.
Returns:
``neptune.types.File`` object that you can assign to run's ``base_namespace``.
Examples:
.. code:: python3
import neptune.new.integrations.sklearn as npt_utils
km = KMeans(n_init=11, max_iter=270)
X, y = make_blobs(n_samples=579, n_features=17, centers=7, random_state=28743)
run = neptune.init(project='my_workspace/my_project')
run['kmeans/cluster_labels'] = npt_utils.get_cluster_labels(km, X)
"""
assert isinstance(model, KMeans), 'Model should be sklearn KMeans instance.'
assert isinstance(nrows, int), 'nrows should be integer, {} was passed'.format(type(nrows))
model.set_params(**kwargs)
labels = model.fit_predict(X)
df = | pd.DataFrame(data={'cluster_labels': labels}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 13:32:51 2022
@author: mostafh
"""
from atomic.analytic.acwrapper import ACWrapper
import json
import pandas as pd
import numpy as np
class TEDWrapper(ACWrapper):
def __init__(self, agent_name, world=None, **kwargs):
super().__init__(agent_name, world, **kwargs)
self.score_names = [
"process_coverage",
"process_coverage_agg",
"inaction_stand_s",
"action_triage_s",
"triage_count",
"action_dig_rubble_s",
"dig_rubble_count",
"action_move_victim_s",
"move_victim_count",
"action_explore_s",
"explore_count",
"process_triaging_agg",
"team_score",
"team_score_agg",
"comms_total_words",
"comms_equity",
"process_skill_use_s",
"process_effort_s",
"process_skill_use_rel",
"process_workload_burnt",
"process_skill_use_agg",
"process_effort_agg",
"process_workload_burnt_agg"]
self.topic_handlers = {
'trial': self.handle_trial,
'agent/ac/ac_cmu_ta2_ted/ted': self.handle_msg}
self.data = pd.DataFrame()
# self.data = pd.DataFrame(columns=['millis'] + self.score_names)
def handle_msg(self, message, data, mission_time):
new_data = [data]
self.last = | pd.DataFrame(new_data) | pandas.DataFrame |
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import pytest
from simplesqlite import connect_memdb
from ._common import print_test_result
try:
import pandas as pd
PANDAS_IMPORT = True
except ImportError:
PANDAS_IMPORT = False
@pytest.mark.skipif(not PANDAS_IMPORT, reason="required package not found")
class Test_fromto_pandas_dataframe:
def test_normal(self):
con = connect_memdb()
dataframe = pd.DataFrame(
[[0, 0.1, "a"], [1, 1.1, "bb"], [2, 2.2, "ccc"]], columns=["id", "value", "name"]
)
table_name = "tablename"
con.create_table_from_dataframe(dataframe, table_name)
actual_all = con.select_as_dataframe(table_name=table_name)
print_test_result(expected=dataframe, actual=actual_all)
assert actual_all.equals(dataframe)
select_columns = ["value", "name"]
actual_part = con.select_as_dataframe(table_name=table_name, columns=select_columns)
assert actual_part.equals(
pd.DataFrame([[0.1, "a"], [1.1, "bb"], [2.2, "ccc"]], columns=select_columns)
)
def test_normal_include_datetime(self):
con = connect_memdb()
table_name = "table_w_datetime"
dataframe = pd.DataFrame(
[
["2020-03-25 15:24:00-04:00", 0, 0.1, "a"],
["2020-03-25 15:25:00-04:00", 1, 1.1, "bb"],
["2020-03-25 15:30:00-04:00", 2, 2.2, "ccc"],
],
columns=["timestamp", "id", "value", "name"],
)
dataframe["timestamp"] = | pd.to_datetime(dataframe["timestamp"]) | pandas.to_datetime |
from fastapi import FastAPI, APIRouter, HTTPException
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
from fastapi_utils.tasks import repeat_every
from app.api import predict, viz, getdata
from pydantic import BaseModel, Field, validator
import pandas as pd
import praw
import os
import requests
from bs4 import BeautifulSoup
import re
import pickle
from newspaper import Article
import spacy
from collections import Counter
from datetime import datetime
from dotenv import load_dotenv
# set up various things to be loaded outside of the function
# geolocation data
locs_path = os.path.join(os.path.dirname(
__file__), '..', 'cities_states.csv')
locs_df = pd.read_csv(locs_path)
def lowerify(text):
# fix up geolocation dataframe a little
return text.lower()
locs_df = locs_df.drop(columns=['Unnamed: 0', 'country'])
locs_df['city_ascii'] = locs_df['city_ascii'].apply(lowerify)
locs_df['admin_name'] = locs_df['admin_name'].apply(lowerify)
states_map = {}
# for each state, map their respective cities
for state in list(locs_df.admin_name.unique()):
states_map[state] = locs_df[locs_df['admin_name']
== state]['city_ascii'].to_list()
# police brutality indentifying nlp
model_path = os.path.join(os.path.dirname(
__file__), '..', 'model.pkl')
model_file = open(model_path, 'rb')
pipeline = pickle.load(model_file)
model_file.close()
# local csv backlog path
backlog_path = os.path.join(os.path.dirname(
__file__), '..', 'backlog.csv'
)
# spacy nlp model
nlp = spacy.load('en_core_web_sm')
load_dotenv()
app = FastAPI(
title='Human Rights First Data Science API',
description='Returns posts from Reddit\'s r/news subreddit on police brutality',
version='0.5',
docs_url='/',
)
app.include_router(predict.router)
app.include_router(viz.router)
app.include_router(getdata.router)
@app.on_event('startup')
@repeat_every(seconds=60*60*24) # 24 hours
def run_update() -> None:
'''
Update backlog database with data from reddit.
'''
print('Updating backlog at %s' % datetime.now())
PRAW_CLIENT_ID = os.getenv('PRAW_CLIENT_ID')
PRAW_CLIENT_SECRET = os.getenv('PRAW_CLIENT_SECRET')
PRAW_USER_AGENT = os.getenv('PRAW_USER_AGENT')
reddit = praw.Reddit(
client_id=PRAW_CLIENT_ID,
client_secret=PRAW_CLIENT_SECRET,
user_agent=PRAW_USER_AGENT
)
# Grab data from reddit
data = []
for submission in reddit.subreddit("news").top('week', limit=500):
data.append([
submission.id, submission.title, submission.url
])
# construct a dataframe with the data
col_names = ['id', 'title', 'url']
df = pd.DataFrame(data, columns=col_names)
# pull the text from each article itself using newspaper3k
content_list = []
date_list = []
# go through each URL and use newspaper3k to extract data
for id_url in df['url']:
# use newspaper3k to extract text
article = Article(id_url)
article.download()
# if the article doesn't download, the error is thrown in parse()
try:
article.parse()
except:
# add null values to show no connection
content_list.append(None)
date_list.append(None)
continue
content_list.append(article.text)
# this will be null if newspaper3k can't find it
date_list.append(article.publish_date)
df['text'] = content_list
df['date'] = date_list
# drop any articles with missing data columns
df = df.dropna()
df = df.reset_index()
df = df.drop(columns='index')
# convert date column to pandas Timestamps
def timestampify(date):
return | pd.Timestamp(date, unit='s') | pandas.Timestamp |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pandas as pd
# File called _pytest for PyCharm compatibility
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from tests.common import TestData, assert_almost_equal
class TestDataFrameMetrics(TestData):
funcs = ["max", "min", "mean", "sum"]
extended_funcs = ["median", "mad", "var", "std"]
filter_data = [
"AvgTicketPrice",
"Cancelled",
"dayOfWeek",
"timestamp",
"DestCountry",
]
@pytest.mark.parametrize("numeric_only", [False, None])
def test_flights_metrics(self, numeric_only):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
for func in self.funcs:
# Pandas v1.0 doesn't support mean() on datetime
# Pandas and Eland don't support sum() on datetime
if not numeric_only:
dtype_include = (
[np.number, np.datetime64]
if func not in ("mean", "sum")
else [np.number]
)
pd_flights = pd_flights.select_dtypes(include=dtype_include)
ed_flights = ed_flights.select_dtypes(include=dtype_include)
pd_metric = getattr(pd_flights, func)(numeric_only=numeric_only)
ed_metric = getattr(ed_flights, func)(numeric_only=numeric_only)
assert_series_equal(pd_metric, ed_metric, check_dtype=False)
def test_flights_extended_metrics(self):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
# Test on reduced set of data for more consistent
# median behaviour + better var, std test for sample vs population
pd_flights = pd_flights[["AvgTicketPrice"]]
ed_flights = ed_flights[["AvgTicketPrice"]]
import logging
logger = logging.getLogger("elasticsearch")
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
for func in self.extended_funcs:
pd_metric = getattr(pd_flights, func)(
**({"numeric_only": True} if func != "mad" else {})
)
ed_metric = getattr(ed_flights, func)(numeric_only=True)
pd_value = pd_metric["AvgTicketPrice"]
ed_value = ed_metric["AvgTicketPrice"]
assert (ed_value * 0.9) <= pd_value <= (ed_value * 1.1) # +/-10%
def test_flights_extended_metrics_nan(self):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
# Test on single row to test NaN behaviour of sample std/variance
pd_flights_1 = pd_flights[pd_flights.FlightNum == "9HY9SWR"][["AvgTicketPrice"]]
ed_flights_1 = ed_flights[ed_flights.FlightNum == "9HY9SWR"][["AvgTicketPrice"]]
for func in self.extended_funcs:
pd_metric = getattr(pd_flights_1, func)()
ed_metric = getattr(ed_flights_1, func)(numeric_only=False)
assert_series_equal(pd_metric, ed_metric, check_exact=False)
# Test on zero rows to test NaN behaviour of sample std/variance
pd_flights_0 = pd_flights[pd_flights.FlightNum == "XXX"][["AvgTicketPrice"]]
ed_flights_0 = ed_flights[ed_flights.FlightNum == "XXX"][["AvgTicketPrice"]]
for func in self.extended_funcs:
pd_metric = getattr(pd_flights_0, func)()
ed_metric = getattr(ed_flights_0, func)(numeric_only=False)
assert_series_equal(pd_metric, ed_metric, check_exact=False)
def test_ecommerce_selected_non_numeric_source_fields(self):
# None of these are numeric
columns = [
"category",
"currency",
"customer_birth_date",
"customer_first_name",
"user",
]
pd_ecommerce = self.pd_ecommerce()[columns]
ed_ecommerce = self.ed_ecommerce()[columns]
for func in self.funcs:
assert_series_equal(
getattr(pd_ecommerce, func)(numeric_only=True),
getattr(ed_ecommerce, func)(numeric_only=True),
check_exact=False,
)
def test_ecommerce_selected_mixed_numeric_source_fields(self):
# Some of these are numeric
columns = [
"category",
"currency",
"taxless_total_price",
"customer_birth_date",
"total_quantity",
"customer_first_name",
"user",
]
pd_ecommerce = self.pd_ecommerce()[columns]
ed_ecommerce = self.ed_ecommerce()[columns]
for func in self.funcs:
assert_series_equal(
getattr(pd_ecommerce, func)(numeric_only=True),
getattr(ed_ecommerce, func)(numeric_only=True),
check_exact=False,
)
def test_ecommerce_selected_all_numeric_source_fields(self):
# All of these are numeric
columns = ["total_quantity", "taxful_total_price", "taxless_total_price"]
pd_ecommerce = self.pd_ecommerce()[columns]
ed_ecommerce = self.ed_ecommerce()[columns]
for func in self.funcs:
assert_series_equal(
getattr(pd_ecommerce, func)(numeric_only=True),
getattr(ed_ecommerce, func)(numeric_only=True),
check_exact=False,
)
def test_flights_datetime_metrics_agg(self):
ed_timestamps = self.ed_flights()[["timestamp"]]
expected_values = {
"max": pd.Timestamp("2018-02-11 23:50:12"),
"min": pd.Timestamp("2018-01-01 00:00:00"),
"mean": pd.Timestamp("2018-01-21 19:20:45.564438232"),
"sum": pd.NaT,
"mad": pd.NaT,
"var": pd.NaT,
"std": pd.NaT,
"nunique": 12236,
}
ed_metrics = ed_timestamps.agg(
self.funcs + self.extended_funcs + ["nunique"], numeric_only=False
)
ed_metrics_dict = ed_metrics["timestamp"].to_dict()
ed_metrics_dict.pop("median") # Median is tested below.
for key, expected_value in expected_values.items():
assert_almost_equal(ed_metrics_dict[key], expected_value)
@pytest.mark.parametrize("agg", ["mean", "min", "max", "nunique"])
def test_flights_datetime_metrics_single_agg(self, agg):
ed_timestamps = self.ed_flights()[["timestamp"]]
expected_values = {
"min": pd.Timestamp("2018-01-01 00:00:00"),
"mean": pd.Timestamp("2018-01-21 19:20:45.564438232"),
"max": | pd.Timestamp("2018-02-11 23:50:12") | pandas.Timestamp |
import unittest
import pandas as pd
import numpy as np
from benchutils.metrics import l2_norm, auprc, correlation, rmse
class TestMetrics(unittest.TestCase):
def test_auprc_basic(self):
exp_profile = | pd.Series([0.5, 0.4, 0.1, 0]) | pandas.Series |
"""
This Python module allows you to synchronize a whole session worth
of physiological data to the session images, so the physiological
files will be matched to the corresponding image files and named
accordingly.
The module is file type agnostic, so it can be used for AcqKnowledge
files, Eye-link files (.edf), etc. The module relies on the calling
function extracting the timing of the onsets of the different
scanner runs. The module will then find the best time delay between
the physiological files and the imaging files.
Methods related to saving the physiological files (either as BIDS or
just compressed) are part of this module too, since the call can be
file type independent.
Based on <NAME>'s conversion.py:
https://github.com/tsalo/phys2bids/blob/eb46a71d7881c4dcd0c5e70469d88cb99bb01f1c/phys2bids/conversion.py
"""
import os
import os.path as op
import tarfile
from tempfile import TemporaryDirectory
from bids import BIDSLayout
import pandas as pd
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
from bidsphysio.base.bidsphysio import PhysioData
def compress_physio(physio_file, out_prefix, get_physio_acq_time, overwrite):
"""Archives a physiological file into a tarball
Also it tries to do it reproducibly, so it takes the date for
the physio_file and targets tarball based on the acquisition time
Parameters
----------
physio_file : str
original physiological file
out_prefix : str
output path prefix, including the portion of the output file
name before .*.tgz suffix
get_physio_acq_time : function
Function to get the acquisition time of a physiological file
(e.g., read_file(file).earliest_marker_created_at, from bioread)
overwrite : bool
Overwrite existing tarfiles
Returns
-------
filename : str
Result tarball
"""
fname, physio_extension = op.splitext(physio_file)
outtar = out_prefix + physio_extension + '.tgz'
if op.exists(outtar) and not overwrite:
print("File {} already exists, will not overwrite".format(outtar))
return
# tarfile encodes current time.time inside, making those non-
# reproducible, so we should use the earliest_marker_created_at
# of the acq_file
# return physio file acquisition time as a float (like in
# the method time.time()):
acq_time = get_physio_acq_time(physio_file).timestamp()
def _assign_acq_time(ti):
# Reset the time of the TarInfo object:
ti.mtime = acq_time
return ti
# poor man mocking since can't rely on having mock
try:
import time
_old_time = time.time
time.time = lambda: acq_time
if op.lexists(outtar):
os.unlink(outtar)
with tarfile.open(outtar, 'w:gz', dereference=True) as tar:
tmpdir = TemporaryDirectory()
outfile = op.join(tmpdir.name, op.basename(physio_file))
if not op.islink(outfile):
os.symlink(op.realpath(physio_file), outfile)
# place into archive stripping any lead directories and
# adding the one corresponding to prefix
tar.add(outfile,
arcname=op.join(op.basename(out_prefix),
op.basename(outfile)),
recursive=False,
filter=_assign_acq_time)
finally:
time.time = _old_time
return outtar
def synchronize_onsets(phys_df, scan_df):
"""Find matching scans and physio trigger periods from separate DataFrames,
using time differences within each DataFrame.
There can be fewer physios than scans (task failed to trigger physio)
or fewer scans than physios (aborted scans are not retained in BIDS dataset).
Onsets are in seconds. The baseline (i.e., absolute timing) doesn't matter.
Relative timing is all that matters.
Parameters
----------
phys_df : pandas.DataFrame
DataFrame with onsets of physio trigger periods, in seconds. The
baseline does not matter, so it is reasonable for the onsets to start
with zero. The following columns are required: 'onset', 'index'.
scan_df : pandas.DataFrame
DataFrame with onsets and names of functional scans from BIDS dataset,
in seconds. The baseline does not matter, so it is reasonable for the
onsets to start with zero. The following columns are required: 'onset',
'duration'.
Returns
-------
phys_df : pandas.DataFrame
Updated scan DataFrame, now with columns for predicted physio onsets in
seconds and in indices of the physio trigger channel, as well as scan
duration in units of the physio trigger channel.
"""
phys_df = phys_df.sort_values(by=['onset'])
scan_df = scan_df.sort_values(by=['onset'])
scan_df.index = range(scan_df.shape[0]) # overwrite the run number
# Get difference between each physio trigger onset and each scan onset
onset_diffs = np.zeros((scan_df.shape[0], phys_df.shape[0]))
for i, i_scan in scan_df.iterrows():
for j, j_phys in phys_df.iterrows():
onset_diff = j_phys['onset'] - i_scan['onset']
onset_diffs[i, j] = onset_diff
# Find the delay that gives the smallest difference between scan onsets
# and physio onsets
selected = (None, None)
thresh = 1000
for i_scan in range(onset_diffs.shape[0]):
for j_phys in range(onset_diffs.shape[1]):
test_offset = onset_diffs[i_scan, j_phys]
diffs_from_phys_onset = onset_diffs - test_offset
diffs_from_abs = np.abs(diffs_from_phys_onset)
min_diff_row_idx = np.argmin(diffs_from_abs, axis=0)
min_diff_col_idx = np.arange(len(min_diff_row_idx))
min_diffs = diffs_from_abs[min_diff_row_idx, min_diff_col_idx]
min_diff_sum = np.sum(min_diffs)
if min_diff_sum < thresh:
selected = (i_scan, j_phys)
thresh = min_diff_sum
offset = onset_diffs[selected[0], selected[1]]
# Isolate close, but negative relative onsets, to ensure scan onsets are
# always before or at physio triggers.
close_thresh = 2 # threshold for "close" onsets, in seconds
diffs_from_phys_onset = onset_diffs - offset
min_diff_row_idx = np.argmin(np.abs(diffs_from_phys_onset), axis=0)
min_diff_col_idx = np.arange(len(min_diff_row_idx))
min_diffs = diffs_from_phys_onset[min_diff_row_idx, min_diff_col_idx]
min_diffs_tmp = min_diffs[abs(min_diffs) <= close_thresh]
min_val = min(min_diffs_tmp)
min_diffs += min_val
offset += min_val
print('Scan DF should be adjusted forward by {} seconds'.format(offset))
# Find the filename of the scan the 'onset' of which is close to
# the 'physio_onset' (if none is close enough, enter None):
scan_df['phys_onset'] = scan_df['onset'] + offset
scan_fnames = []
for p_on in phys_df['onset']:
corresponding_scan = scan_df.loc[
abs(scan_df['phys_onset'] - p_on) < close_thresh,
'filename'
]
if len(corresponding_scan) == 0:
scan_fnames.append(None)
else:
# append the scan filename
scan_fnames.append(corresponding_scan.iloc[0])
# Add the scan filenames to the phys_df:
phys_df['scan_fname'] = [sf for sf in scan_fnames]
return phys_df
def plot_sync(scan_df, physio_df):
"""
Plot unsynchronized and synchonized scan and physio onsets and durations.
"""
# You need a scan_df already synchronized (so it has the 'phys_onset':
if 'phys_onset' not in scan_df.columns:
raise RuntimeError('The physio data has not been synchronized yet.')
fig, axes = plt.subplots(nrows=2, figsize=(20, 6), sharex=True)
# get max value rounded to nearest 1000
max_ = int(1000 * np.ceil(max((
physio_df['onset'].max(),
scan_df['onset'].max(),
scan_df['phys_onset'].max())) / 1000))
scalar = 10
x = np.linspace(0, max_, (max_*scalar)+1)
# first the raw version
physio_timeseries = np.zeros(x.shape)
func_timeseries = np.zeros(x.shape)
for i, row in scan_df.iterrows():
func_timeseries[
int(row['onset'] * scalar):int((row['onset'] + row['duration']) * scalar)
] = 1
for i, row in physio_df.iterrows():
physio_timeseries[
int(row['onset'] * scalar):int((row['onset'] + row['duration']) * scalar)
] = 0.5
axes[0].fill_between(x, func_timeseries, where=func_timeseries >= 0,
interpolate=True, color='red', alpha=0.3,
label='Functional scans')
axes[0].fill_between(x, physio_timeseries, where=physio_timeseries >= 0,
interpolate=True, color='blue', alpha=0.3,
label='Physio triggers')
# now the adjusted version
physio_timeseries = np.zeros(x.shape)
func_timeseries = np.zeros(x.shape)
for i, row in scan_df.iterrows():
func_timeseries[
int(row['phys_onset'] * scalar):int((row['phys_onset'] + row['duration']) * scalar)
] = 1
for i, row in physio_df.iterrows():
physio_timeseries[
int(row['onset'] * scalar):int((row['onset'] + row['duration']) * scalar)
] = 0.5
axes[1].fill_between(x, func_timeseries, where=func_timeseries >= 0,
interpolate=True, color='red', alpha=0.3,
label='Functional scans')
axes[1].fill_between(x, physio_timeseries, where=physio_timeseries >= 0,
interpolate=True, color='blue', alpha=0.3,
label='Physio triggers')
axes[0].set_xlim((min(x), max(x)))
axes[0].set_ylim((0, None))
axes[1].set_xlabel('Time (s)')
axes[0].legend()
return fig, axes
def determine_scan_durations(layout, scan_df, sub, ses):
"""Extract scan durations by loading fMRI files/metadata and
multiplying TR by number of volumes. This can be used to determine the
endpoints for the physio files.
Parameters
----------
layout : bids.layout.BIDSLayout
Dataset layout. Used to identify functional scans and load them to
determine scan durations.
scan_df : pandas.DataFrame
Scans DataFrame containing functional scan filenames and onset times.
sub : str
Subject ID
ses : str or None, optional
Session ID. If None, then no session.
Returns
-------
scan_df : pandas.DataFrame
Updated DataFrame with new "duration" column. Calculated durations are
in seconds.
"""
# TODO: parse entities in func files for searches instead of larger search.
func_files = layout.get(datatype='func', suffix='bold',
extension=['nii.gz', 'nii'],
subject=sub, session=ses)
scan_df['duration'] = None
for func_file in func_files:
filename = func_file.path
if filename in scan_df['filename'].values:
n_vols = nib.load(func_file.path).shape[3]
tr = func_file.get_metadata()['RepetitionTime']
duration = n_vols * tr
scan_df.loc[scan_df['filename'] == filename, 'duration'] = duration
else:
print('Skipping {}'.format(filename))
return scan_df
def load_scan_data(layout, sub, ses):
"""Extract subject- and session-specific scan onsets and durations from
BIDSLayout.
Start times are relative to the start of the first run.
Times are in seconds.
Parameters
----------
layout : BIDSLayout
Dataset layout. Used to identify functional scans and load them to
determine scan durations.
sub : str
Subject ID
ses : str
Session ID
Returns
-------
df : pandas.DataFrame
DataFrame with the following columns: 'filename', 'acq_time',
'duration', 'onset'.
"""
# This is the strategy we'll use in the future. Commented out for now.
# scans_file = layout.get(extension='tsv', suffix='scans', subject=sub, session=ses)
# df = pd.read_table(scans_file)
# Collect acquisition times:
# NOTE: Will be replaced with scans file if heudiconv makes the change
img_files = layout.get(datatype='func', suffix='bold',
extension=['nii.gz', 'nii'],
subject=sub, session=ses)
df = pd.DataFrame(
{
'filename': [f.path for f in img_files],
'acq_time': [f.get_metadata()['AcquisitionTime'] for f in img_files],
}
)
# Get "first" scan from multi-file acquisitions
df['acq_time'] = pd.to_datetime(df['acq_time'])
df = df.sort_values(by='acq_time')
df = df.drop_duplicates(subset='filename', keep='first', ignore_index=True)
# Now back to general-purpose code
df = determine_scan_durations(layout, df, sub=sub, ses=ses)
df = df.dropna(subset=['duration']) # limit to relevant scans
# Convert scan times to relative onsets (first scan is at 0 seconds)
df['acq_time'] = | pd.to_datetime(df['acq_time']) | pandas.to_datetime |
import pandas as pd
import yaml
from flashtext import KeywordProcessor
from neanno.prediction.predictor import KeyTermsPredictor
from neanno.utils.dataset import DatasetManager
from neanno.utils.dict import merge_dict
from neanno.utils.text import extract_annotations_as_generator
class FromDatasetKeyTermsPredictor(KeyTermsPredictor):
""" Predicts key terms of a text by looking up terms in a dataset."""
location_string = None
dataset = | pd.DataFrame(columns=["term", "parent_terms"], dtype=str) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
from scipy.optimize import fsolve
import scipy.optimize as optimize
import astropy.units as u
from astropy import constants as const
from platypos.lx_evo_and_flux import flux_at_planet_earth, flux_at_planet
from platypos.mass_evolution_function import mass_planet_RK4_forward_LO14
class Planet_LoFo14():
"""
Need star and planet dictionary to initialize a LoFo planet object.
Structure of star_dictionary: {'star_id': "dummySun", 'mass': mass_star,
'radius': radius_star, 'age': age_star,
'L_bol': L_bol, 'Lx_age': Lx_age}
Structure of planet_dict: {"core_mass": m_c, "fenv": f,
"distance": a, "metallicity": metal}
NOTE: There are actually three options for planet_dict:
Case 1) An artificial planet with given core mass and envelope mass
fraction (M_core & f_env)
-> in this case we need to calculate the mass and radius of
the planet (M_pl & R_pl)
Case 2) An observed planet with a known mass (we have M_pl & R_pl
& f_env) -> M_core
Case 3) An observed planet with radius and mass measurement, plus
core mass is specified
-> need to calculate/estimate envelope mass fraction
Additional Input: Lx_sat_info (only needed if you want to scale the
1 & 5 Gyr X-ray luminosities for non-solar-mass stars
"""
def __init__(self, star_dictionary, planet_dict, Lx_sat_info=None):
'''Initialize a new instance (=object) of Planet_LoFo14
Parameters:
-----------
'''
# initialize stellar parameters
self.star_id = star_dictionary['star_id']
self.mass_star = star_dictionary['mass']
self.radius_star = star_dictionary['radius']
self.age = star_dictionary['age']
self.Lbol = star_dictionary['L_bol'] * const.L_sun.cgs.value
self.Lx_age = star_dictionary['Lx_age']
self.Lx_sat_info = Lx_sat_info
# initialize planet parameters based on the input dictionary
# the following 3 parameters are the same for all planets!
self.distance = planet_dict["distance"]
self.metallicity = planet_dict["metallicity"] # solarZ or enhZ
self.flux = flux_at_planet_earth(self.Lbol, self.distance)
# flag which tracks if planet has been evolved and result file exists
self.has_evolved = False
self.planet_id = "dummy" # set planet name later with set_name
self.Lx_sat_info = Lx_sat_info
# the remaining parameters depend on the input dictionary (Case 1, 2, 3)
while True:
try:
# check if envelope mass fraction is specified, then Case 1
planet_dict["fenv"]
self.planet_info = "Case 1 - artificial planet"\
+ " (with M_core & f_env)"
# Case 1: artificial planet with fenv & M_core given, need to
# calculate the total mass and radius
self.fenv = planet_dict["fenv"]
self.core_mass = planet_dict["core_mass"]
self.calculate_core_radius()
self.calculate_planet_mass()
self.calculate_planet_radius()
break
except KeyError: # if no f_env is provided, then we are dealing
# with Case 2 or 3
while True:
try:
# check if planet mass is provided, then Case 3
planet_dict["mass"]
# Case 3: An observed planet with radius and mass
# measurement, plus core mass is specified;
# need to calculate/estimate envelope mass frac.
self.planet_info = "Case 3 - obs. planet with radius"\
+ " & mass measurement (and core "\
+ "mass estimate)"
self.core_mass = planet_dict["core_mass"]
self.mass = planet_dict["mass"]
self.radius = planet_dict["radius"]
self.calculate_core_radius()
self.solve_for_fenv() # get for envelope mass fraction
# Note to self: add sanity check to make sure the mass
# with the calculated fenv matches the input mass!
# Note to self: add sanity check to make sure the radius
# with the calculated fenv matches the input radius!
break
except KeyError:
# no mass and fenv given -> Case 2
self.planet_info = "Case 2 - obs. planet with"\
+ " radius, but no mass measurement"
# Case 2 - observed planet with a without a mass, but
# core mass estimate, need to calculate fenv
self.core_mass = planet_dict["core_mass"]
self.radius = planet_dict["radius"]
self.calculate_core_radius()
self.solve_for_fenv() # get envelope mass fraction
self.calculate_planet_mass()
# Note to self: add sanity check to make sure the radius
# with the calculated fenv matches the input radius!
break
break
# Class Methods
def calculate_planet_mass(self):
""" Planet mass determined by core and atmosphere mass
(specified in terms of envelope mass fraction [in %]). """
self.mass = self.core_mass/(1-(self.fenv/100))
def calculate_planet_core_mass(self):
""" Core mass determined by planet mass and envelope mass
(specified in terms of envelope mass fraction [%]). """
self.core_mass = self.mass*(1-(self.fenv/100))
def calculate_core_radius(self):
"""M-R relation for rock/iron Earth-like core. (no envelope)"""
self.core_radius = (self.core_mass**0.25)
def solve_for_fenv(self):
""" For known core and planet radius, core mass, age and flux,
solve for envelope mass fraction."""
if self.radius == self.core_radius:
self.fenv = 0.0
else:
def calculate_fenv(fenv):
age_exponent = {"solarZ": -0.11, "enhZ": -0.18}
return -self.radius + self.core_radius + (2.06 \
* (self.core_mass/(1 - (fenv / 100)))**(-0.21) \
* (fenv / 5)**0.59 * (self.flux)**0.044 \
* ((self.age / 1e3) \
/ 5)**(age_exponent[self.metallicity]))
f_guess = 0.1
fenv = optimize.fsolve(calculate_fenv, x0=f_guess)[0]
self.fenv = fenv
# if fenv1 != fenv:
# print("Sth went wrong in solving for\
# the envelope mass fraction! Check!")
def calculate_R_env(self):
""" Check Planet_models_LoFo14.py for details on input and
output parameters;
R_env ~ t**0.18 for *enhanced opacities*;
R_env ~ t**0.11 for *solar metallicities*
"""
age_exponent = {"solarZ": -0.11, "enhZ": -0.18}
R_env = 2.06 * self.mass**(-0.21) * (self.fenv / 5)**0.59 * \
self.flux**0.044 * \
((self.age / 1e3) / 5)**(age_exponent[self.metallicity])
return R_env # in units of R_earth
def calculate_planet_radius(self):
""" Check Planet_models_LoFo14.py for details"""
self.radius = self.core_radius + self.calculate_R_env()
def set_name(self, t_final, initial_step_size,
epsilon, K_on, beta_on, evo_track_dict):
""" OBSOLETE
Function to set the right planet name based on the track specified.
This can then be used to check if a particular planet on a particular
track has already evolved (i.e. if outpufile exists). """
self.planet_id = "planet_" \
+ str(np.round(self.distance, 3)).replace('.', 'dot') \
+ '_Mcore' \
+ str(np.round(self.core_mass, 3)).replace(".", "dot" )\
+ '_fenv' \
+ str(np.round(self.fenv, 3)) + '_' + self.metallicity \
+ '_Mstar' \
+ str(np.round(self.mass_star, 3)).replace(".", "dot") \
+ "_K_" + K_on + "_beta_" + beta_on \
+ "_track_" + str(evo_track_dict["t_start"]) + "_" \
+ str(evo_track_dict["t_sat"]) + "_" + str(t_final) \
+ "_" + str(evo_track_dict["Lx_max"]) + "_" \
+ str(evo_track_dict["dt_drop"]) + "_" \
+ str(evo_track_dict["Lx_drop_factor"])
def generate_planet_id(self, t_final,
planet_folder_id, evo_track_dict):
""" Similar as set_name; produces a planet_id which can be used
for saving the results. """
self.planet_id = planet_folder_id + "_track_" \
+ str(evo_track_dict["t_start"]) + "_" \
+ str(evo_track_dict["t_sat"]) + "_" \
+ str(t_final) + "_" + str(evo_track_dict["Lx_max"]) \
+ "_" + str(evo_track_dict["dt_drop"]) + "_" \
+ str(evo_track_dict["Lx_drop_factor"])
def write_general_params_to_file(self, path_for_saving,
planet_folder_id, evo_track_dict):
"""produces various files:
- a file with the initial planet parameters (name is the
planet folder name, e.g. planet_001.txt),
- a file with the host star parameters (host_star.txt),
- a file with the corresponding track parameters starting with
"track_params_"
"""
# create a file (e.g. planet_XXXX.txt) which contains the
# initial planet params
if not os.path.exists(path_for_saving + planet_folder_id + ".txt"):
with open(path_for_saving + planet_folder_id + ".txt", "w") as p:
planet_params = "a,core_mass,fenv,mass,radius,metallicity,age\n" \
+ str(self.distance) + "," \
+ str(self.core_mass) + "," \
+ str(self.fenv) + "," + str(self.mass) \
+ "," + str(self.radius) + "," \
+ self.metallicity + "," + str(self.age)
p.write(planet_params)
# create a file (track_params_planet_....txt) which contains
# the track parameters
if not os.path.exists(path_for_saving + "track_params_" \
+ self.planet_id + ".txt"):
with open(path_for_saving + "track_params_" + self.planet_id + ".txt", "w") as t:
track_params = "t_start,t_sat,t_curr,t_5Gyr,Lx_max,Lx_curr," \
+ "Lx_5Gyr,dt_drop,Lx_drop_factor\n" \
+ str(evo_track_dict["t_start"]) + "," \
+ str(evo_track_dict["t_sat"]) + "," \
+ str(evo_track_dict["t_curr"]) + "," \
+ str(evo_track_dict["t_5Gyr"]) + "," \
+ str(evo_track_dict["Lx_max"]) + "," \
+ str(evo_track_dict["Lx_curr"]) + "," \
+ str(evo_track_dict["Lx_5Gyr"]) + "," \
+ str(evo_track_dict["dt_drop"]) + "," \
+ str(evo_track_dict["Lx_drop_factor"])
t.write(track_params)
# create a file which contains the host star parameters
if not os.path.exists(path_for_saving + "host_star_properties.txt"):
with open(path_for_saving + "host_star_properties.txt", "w") as s:
star_params = "star_id,mass_star,radius_star,age,Lbol,Lx_age\n" \
+ self.star_id + "," + str(self.mass_star) \
+ "," + str(self.radius_star) \
+ "," + str(self.age) \
+ "," + str(self.Lbol / const.L_sun.cgs.value) \
+ "," + str(self.Lx_age)
s.write(star_params)
def write_final_params_to_file(self, results_df, path_for_saving):
""" Create file with only the final time, mass and radius parameters. """
# create another file, which contains the final parameters only
if not os.path.exists(path_for_saving \
+ self.planet_id + "_final.txt"):
with open(path_for_saving + self.planet_id + "_final.txt", "w") as p:
# get last element (final time, mass, radius)
t_final = results_df["Time"].iloc[-1]
R_final = results_df["Radius"].iloc[-1]
M_final = results_df["Mass"].iloc[-1]
f_env_final = ((M_final - self.core_mass) / M_final) * 100 # %
planet_params = "a,core_mass,time,fenv,mass,radius,metallicity,track\n" \
+ str(self.distance) + "," \
+ str(self.core_mass) + "," \
+ str(t_final) + "," \
+ str(f_env_final) + "," \
+ str(M_final) + "," \
+ str(R_final) + "," \
+ self.metallicity + "," \
+ self.planet_id
p.write(planet_params)
def evolve_forward(self, t_final,
initial_step_size,
epsilon, K_on, beta_on,
evo_track_dict,
path_for_saving,
planet_folder_id):
""" Call this function to make the planet evolve and
create file with mass and radius evolution.
See Mass_evolution_function.py for details on the integration."""
if os.path.exists(path_for_saving + self.planet_id + ".txt"):
# planet already exists
self.has_evolved = True
df = pd.read_csv(path_for_saving + self.planet_id + ".txt")
else:
#print("Planet: ", self.planet_id+".txt")
# call mass_planet_RK4_forward_LO14 to start the integration
t, M, R, Lx = mass_planet_RK4_forward_LO14(
epsilon=epsilon,
K_on=K_on,
beta_on=beta_on,
planet_object=self,
initial_step_size=initial_step_size,
t_final=t_final,
track_dict=evo_track_dict
)
### TO DO: move this to mass_planet_RK4_forward_LO14 -
# > make it return dataframe
# add results to dataframe and save
df = | pd.DataFrame({"Time": t, "Mass": M, "Radius": R, "Lx": Lx}) | pandas.DataFrame |
import pandas as pd
print("Load effectif + siren data")
dfeff = | pd.read_csv("../data/simu-effectifs/effectif.csv") | pandas.read_csv |
# Set up visualization
import time
from IPython import display
import pandas as pd
from gretel_client.transformers import DataTransformPipeline
def display_df(
df: pd.DataFrame, sleep: float, clear: bool, title: str, title_color: str
):
style = df.style.apply(highlight_tags, cols=["tags"])
if title:
style = style.set_caption(title).set_table_styles(
[
{
"selector": "caption",
"props": [("color", title_color), ("font-size", "14px")],
}
]
)
if clear:
display.clear_output(wait=True)
display.display(style)
time.sleep(sleep)
def highlight_tags(s, cols) -> list:
""" Style the discovered entities
Params:
s : series
cols : list, list of columns to style
"""
color_map = ["#47E0B3", "#F98043", "#50D8F1", "#C18DFC"]
return [
"background-color: {}".format(color_map[ord(str(x)[-1]) % len(color_map) - 1])
if len(str(x)) > 0 and s.name in cols
else ""
for x in s
]
def stream_table_view(
data: dict,
xf: DataTransformPipeline = None,
sleep: float = 0.0,
title: str = None,
title_color: str = "black",
clear: bool = False,
):
"""
Stream a table view into a Jupyter cell
"""
if xf:
transformed = xf.transform_record(data)
df = pd.DataFrame.from_dict(
transformed["record"], orient="index", columns=["field"]
)
df["tags"] = ""
for field, value in transformed["record"].items():
if field in data["record"].keys():
if value != data["record"][field]:
df.at[field, "tags"] = "Transformed"
else:
field_data = data["metadata"]["fields"].get(
str(field), {"ner": {"labels": []}}
)
labels = ", ".join(
[x["label"] for x in field_data["ner"]["labels"]]
)
df.at[field, "tags"] = labels
else:
df.at[field, "tags"] = "Transformed"
else:
# Gretel format record +
df = | pd.DataFrame.from_dict(data["record"], orient="index", columns=["field"]) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
# In[5]:
from pandas_datareader import data as pdr
from yahoo_fin import stock_info as si
from pandas import ExcelWriter
import yfinance as yf
import pandas as pd
import requests
from datetime import datetime, timedelta, date
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import mplfinance as mpf
import xlrd
import img2pdf
import fnmatch
from PIL import Image
from PyPDF2 import PdfFileMerger, PdfFileReader
from fpdf import FPDF
import os.path
import gc
import json
import math
from matplotlib.backends.backend_pdf import PdfPages
import glob
import sys
from pathlib import Path
import subprocess
import os
matplotlib.use('Agg')
outputPath_name = "C:/Users/Program_data/" # For placing your newly created pdf .py所產生全部.pdf將會進入此folder
pythonPath_name = "C:/Users/Program_data/python" # For locating this .py and the list of tickers(companylist.csv) 你放置python及companylist.csv既地方
total_info_name = "C:/Users/Program_data/total_info.csv" # For logging the stock statistic daily 收集每日.pdf數據作縱貫分析
pythonPath = Path(pythonPath_name)
outputPath = Path(outputPath_name)
# In[6]:
def spawn_program_and_die(program, exit_code=0): # 自殺再啟動用
"""
Start an external program and exit the script
with the specified return code.
Takes the parameter program, which is a list
that corresponds to the argv of your command.
"""
# Start the external program
subprocess.Popen(program)
# We have started the program, and can suspend this interpreter
sys.exit(exit_code)
if not os.path.exists(outputPath):
os.makedirs(outputPath)
if not os.path.exists(pythonPath):
os.makedirs(pythonPath)
os.chdir(outputPath) # 轉Directory 去有pdf既位置
listOfFiles = []
Mon = 0
Tue = 0
Wed = 0
Thu = 0
Fri = 0
try:
for name in glob.glob("????-??-??.pdf"): # 將file名成日期, 再轉成星期一二三四五
filename = os.path.splitext(name)[0]
weekday = datetime.strptime(filename, '%Y-%m-%d').strftime('%a')
if weekday == "Fri":
Fri += 1
elif weekday == "Thu":
Thu += 1
elif weekday == "Wed":
Wed += 1
elif weekday == "Tue":
Tue += 1
elif weekday == "Mon":
Mon += 1
except Exception as e:
print(e)
print('Mon:', Mon, ' Tue:', Tue, ' Wed:', Wed,' Thu:', Thu, ' Fri:', Fri)
week_day_number = -5
if Mon == Tue == Wed == Thu == Fri: #找出那一個weekday 最少, 由佢開始做
week_day_number = -5
elif Fri == min(Mon,Tue, Wed, Thu, Fri):
week_day_number = -5
elif Thu == min(Mon,Tue, Wed, Thu, Fri):
week_day_number = -4
elif Wed == min(Mon,Tue, Wed, Thu, Fri):
week_day_number = -3
elif Tue == min(Mon,Tue, Wed, Thu, Fri):
week_day_number = -2
elif Mon == min(Mon,Tue, Wed, Thu, Fri):
week_day_number = -1
# In[7]:
last_trade_day = datetime.utcnow() - timedelta(hours = 1)
last_trade_day = last_trade_day.date() #定義上一個交易日既日期, 最少要過左上一個香港日期凌晨9個鐘
offset = (last_trade_day.weekday() + week_day_number)%7
last_weekday = last_trade_day - timedelta(days=offset) #定義最少weekday 既對上一個既日期
working = True
i = 0
# In[11]:
while working == True:
print("Selecting trade day...")
try:
date_study = last_weekday - i*timedelta(days = 7) #向前找出未下載資料既weekday, 最初值 i=0
daily_file_name = date_study - timedelta(days = 1)
i += 1
yf.pdr_override()
test = os.listdir(outputPath)
for item in test:
if item.endswith(".png"):
os.remove(os.path.join(outputPath, item))
if item.endswith(".jpg"):
os.remove(os.path.join(outputPath, item))
os.chdir(outputPath)
if os.path.isfile(f'{daily_file_name}.pdf'): #萬一出現重複值, 自動continue 跳出迥圈, 回到上一個try,向前找出未下載資料既weekday
continue
else:
pass #如果沒有, 就繼續
os.chdir(pythonPath)
try:
os.remove("stocks.csv")
time.sleep(1)
except Exception as e:
print(e)
data = pd.read_csv("companylist.csv", header=0)
stocklist = list(data.Symbol)
final = []
index = []
rs = []
n = -1
adv = 0
decl = 0
new_high = 0
new_low = 0
c_20 = 0
c_50 = 0
s_20_50 = 0
s_50_200 = 0
s_200_200_20 = 0
s_50_150_200 = 0
index_list = []
stocks_fit_condition = 0
stock_name = []
gauge = 0
exportList = | pd.DataFrame(columns=['Stock', "RS_Rating", "50 Day MA", "150 Day Ma", "200 Day MA", "52 Week Low", "52 week High"]) | pandas.DataFrame |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Defines a helpful dataset wrapper to allow operations such as summarizing data, taking the subset or sampling."""
import pandas as pd
import scipy as sp
import numpy as np
from ..common.explanation_utils import _summarize_data, _generate_augmented_data
from ..common.explanation_utils import module_logger
from ..common.constants import Defaults
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Starting from version 2.2.1', UserWarning)
from shap.common import DenseData
class DatasetWrapper(object):
"""A wrapper around a dataset to make dataset operations more uniform across explainers.
:param dataset: A matrix of feature vector examples (# examples x # features) for
initializing the explainer.
:type dataset: numpy.array or pandas.DataFrame or iml.datatypes.DenseData or
scipy.sparse.csr_matrix
"""
def __init__(self, dataset):
"""Initialize the dataset wrapper.
:param dataset: A matrix of feature vector examples (# examples x # features) for
initializing the explainer.
:type dataset: numpy.array or pandas.DataFrame or iml.datatypes.DenseData or
scipy.sparse.csr_matrix
"""
self._features = None
self._original_dataset_with_type = dataset
self._dataset_is_df = isinstance(dataset, pd.DataFrame)
self._dataset_is_series = isinstance(dataset, pd.Series)
if self._dataset_is_df:
self._features = dataset.columns.values.tolist()
if self._dataset_is_df or self._dataset_is_series:
dataset = dataset.values
self._dataset = dataset
self._original_dataset = dataset
self._summary_dataset = None
self._column_indexer = None
self._subset_taken = False
self._summary_computed = False
self._string_indexed = False
self._one_hot_encoded = False
self._one_hot_encoder = None
@property
def dataset(self):
"""Get the dataset.
:return: The underlying dataset.
:rtype: numpy.array or iml.datatypes.DenseData or scipy.sparse.csr_matrix
"""
return self._dataset
@property
def typed_dataset(self):
"""Get the dataset in the original type, pandas DataFrame or Series.
:return: The underlying dataset.
:rtype: numpy.array or pandas.DataFrame or pandas.Series or iml.datatypes.DenseData or scipy.sparse matrix
"""
wrapper_func = self.typed_wrapper_func
return wrapper_func(self._dataset)
def typed_wrapper_func(self, dataset):
"""Get a wrapper function to convert the dataset to the original type, pandas DataFrame or Series.
:param dataset: The dataset to convert to original type.
:type dataset: numpy.array or scipy.sparse.csr_matrix
:return: A wrapper function for a given dataset to convert to original type.
:rtype: function that outputs the original type
"""
if self._dataset_is_df:
if len(dataset.shape) == 1:
dataset = dataset.reshape(1, dataset.shape[0])
original_dtypes = self._original_dataset_with_type.dtypes
return | pd.DataFrame(dataset, columns=self._features) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 14 14:20:02 2020
@author: aliha
@twitter: rockingAli5
"""
import time
import pandas as pd
pd.options.mode.chained_assignment = None
import json
from bs4 import BeautifulSoup as soup
import re
from collections import OrderedDict
import datetime
from datetime import datetime as dt
import itertools
import numpy as np
try:
from tqdm import trange
except ModuleNotFoundError:
pass
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
TRANSLATE_DICT = {'Jan': 'Jan',
'Feb': 'Feb',
'Mac': 'Mar',
'Apr': 'Apr',
'Mei': 'May',
'Jun': 'Jun',
'Jul': 'Jul',
'Ago': 'Aug',
'Sep': 'Sep',
'Okt': 'Oct',
'Nov': 'Nov',
'Des': 'Dec',
'Jan': 'Jan',
'Feb': 'Feb',
'Mar': 'Mar',
'Apr': 'Apr',
'May': 'May',
'Jun': 'Jun',
'Jul': 'Jul',
'Aug': 'Aug',
'Sep': 'Sep',
'Oct': 'Oct',
'Nov': 'Nov',
'Dec': 'Dec'}
main_url = 'https://1xbet.whoscored.com/'
def getLeagueUrls(minimize_window=True):
driver = webdriver.Chrome('chromedriver.exe')
if minimize_window:
driver.minimize_window()
driver.get(main_url)
league_names = []
league_urls = []
for i in range(21):
league_name = driver.find_element_by_xpath('//*[@id="popular-tournaments-list"]/li['+str(i+1)+']/a').text
league_link = driver.find_element_by_xpath('//*[@id="popular-tournaments-list"]/li['+str(i+1)+']/a').get_attribute('href')
league_names.append(league_name)
league_urls.append(league_link)
for link in league_urls:
if 'Russia' in link:
r_index = league_urls.index(link)
league_names[r_index] = 'Russian Premier League'
leagues = {}
for name,link in zip(league_names,league_urls):
leagues[name] = link
driver.close()
return leagues
def getMatchUrls(comp_urls, competition, season, maximize_window=True):
driver = webdriver.Chrome('chromedriver.exe')
if maximize_window:
driver.maximize_window()
comp_url = comp_urls[competition]
driver.get(comp_url)
time.sleep(5)
seasons = driver.find_element_by_xpath('//*[@id="seasons"]').get_attribute('innerHTML').split(sep='\n')
seasons = [i for i in seasons if i]
for i in range(1, len(seasons)+1):
if driver.find_element_by_xpath('//*[@id="seasons"]/option['+str(i)+']').text == season:
driver.find_element_by_xpath('//*[@id="seasons"]/option['+str(i)+']').click()
time.sleep(5)
try:
stages = driver.find_element_by_xpath('//*[@id="stages"]').get_attribute('innerHTML').split(sep='\n')
stages = [i for i in stages if i]
all_urls = []
for i in range(1, len(stages)+1):
if competition == 'Champions League' or competition == 'Europa League':
if 'Group Stages' in driver.find_element_by_xpath('//*[@id="stages"]/option['+str(i)+']').text or 'Final Stage' in driver.find_element_by_xpath('//*[@id="stages"]/option['+str(i)+']').text:
driver.find_element_by_xpath('//*[@id="stages"]/option['+str(i)+']').click()
time.sleep(5)
driver.execute_script("window.scrollTo(0, 400)")
match_urls = getFixtureData(driver)
match_urls = getSortedData(match_urls)
match_urls2 = [url for url in match_urls if '?' not in url['date'] and '\n' not in url['date']]
all_urls += match_urls2
else:
continue
elif competition == 'Major League Soccer':
if 'Grp. ' not in driver.find_element_by_xpath('//*[@id="stages"]/option['+str(i)+']').text:
driver.find_element_by_xpath('//*[@id="stages"]/option['+str(i)+']').click()
time.sleep(5)
driver.execute_script("window.scrollTo(0, 400)")
match_urls = getFixtureData(driver)
match_urls = getSortedData(match_urls)
match_urls2 = [url for url in match_urls if '?' not in url['date'] and '\n' not in url['date']]
all_urls += match_urls2
else:
continue
else:
driver.find_element_by_xpath('//*[@id="stages"]/option['+str(i)+']').click()
time.sleep(5)
driver.execute_script("window.scrollTo(0, 400)")
match_urls = getFixtureData(driver)
match_urls = getSortedData(match_urls)
match_urls2 = [url for url in match_urls if '?' not in url['date'] and '\n' not in url['date']]
all_urls += match_urls2
except NoSuchElementException:
all_urls = []
driver.execute_script("window.scrollTo(0, 400)")
match_urls = getFixtureData(driver)
match_urls = getSortedData(match_urls)
match_urls2 = [url for url in match_urls if '?' not in url['date'] and '\n' not in url['date']]
all_urls += match_urls2
remove_dup = [dict(t) for t in {tuple(sorted(d.items())) for d in all_urls}]
all_urls = getSortedData(remove_dup)
driver.close()
return all_urls
season_names = [re.search(r'\>(.*?)\<',season).group(1) for season in seasons]
driver.close()
print('Seasons available: {}'.format(season_names))
raise('Season Not Found.')
def getTeamUrls(team, match_urls):
team_data = []
for fixture in match_urls:
if fixture['home'] == team or fixture['away'] == team:
team_data.append(fixture)
team_data = [a[0] for a in itertools.groupby(team_data)]
return team_data
def getMatchesData(match_urls, minimize_window=True):
matches = []
driver = webdriver.Chrome('chromedriver.exe')
if minimize_window:
driver.minimize_window()
try:
for i in trange(len(match_urls), desc='Getting Match Data'):
# recommended to avoid getting blocked by incapsula/imperva bots
time.sleep(7)
match_data = getMatchData(driver, main_url+match_urls[i]['url'], display=False, close_window=False)
matches.append(match_data)
except NameError:
print('Recommended: \'pip install tqdm\' for a progress bar while the data gets scraped....')
time.sleep(7)
for i in range(len(match_urls)):
match_data = getMatchData(driver, main_url+match_urls[i]['url'], display=False, close_window=False)
matches.append(match_data)
driver.close()
return matches
def getFixtureData(driver):
matches_ls = []
while True:
table_rows = driver.find_elements_by_class_name('divtable-row')
if len(table_rows) == 0:
break
for row in table_rows:
match_dict = {}
element = soup(row.get_attribute('innerHTML'), features='lxml')
link_tag = element.find("a", {"class":"result-1 rc"})
if type(link_tag) is type(None):
if type(element.find('span', {'class':'status-1 rc'})) is type(None):
date = row.text.split(', ')[-1]
if type(link_tag) is not type(None):
match_dict['date'] = date
match_dict['time'] = element.find('div', {'class':'col12-lg-1 col12-m-1 col12-s-0 col12-xs-0 time divtable-data'}).text
match_dict['home'] = element.find_all("a", {"class":"team-link"})[0].text
match_dict['away'] = element.find_all("a", {"class":"team-link"})[1].text
match_dict['score'] = element.find("a", {"class":"result-1 rc"}).text
match_dict['url'] = link_tag.get("href")
matches_ls.append(match_dict)
prev_month = driver.find_element_by_xpath('//*[@id="date-controller"]/a[1]').click()
time.sleep(2)
if driver.find_element_by_xpath('//*[@id="date-controller"]/a[1]').get_attribute('title') == 'No data for previous week':
table_rows = driver.find_elements_by_class_name('divtable-row')
for row in table_rows:
match_dict = {}
element = soup(row.get_attribute('innerHTML'), features='lxml')
link_tag = element.find("a", {"class":"result-1 rc"})
if type(link_tag) is type(None):
if type(element.find('span', {'class':'status-1 rc'})) is type(None):
date = row.text.split(', ')[-1]
if type(link_tag) is not type(None):
match_dict['date'] = date
match_dict['time'] = element.find('div', {'class':'col12-lg-1 col12-m-1 col12-s-0 col12-xs-0 time divtable-data'}).text
match_dict['home'] = element.find_all("a", {"class":"team-link"})[0].text
match_dict['away'] = element.find_all("a", {"class":"team-link"})[1].text
match_dict['score'] = element.find("a", {"class":"result-1 rc"}).text
match_dict['url'] = link_tag.get("href")
matches_ls.append(match_dict)
break
matches_ls = list(filter(None, matches_ls))
return matches_ls
def translateDate(data):
for match in data:
date = match['date'].split()
match['date'] = ' '.join([TRANSLATE_DICT[date[0]], date[1], date[2]])
return data
def getSortedData(data):
try:
data = sorted(data, key = lambda i: dt.strptime(i['date'], '%b %d %Y'))
return data
except ValueError:
data = translateDate(data)
data = sorted(data, key = lambda i: dt.strptime(i['date'], '%b %d %Y'))
return data
def getMatchData(driver, url, display=True, close_window=True):
driver.get(url)
# get script data from page source
script_content = driver.find_element_by_xpath('//*[@id="layout-wrapper"]/script[1]').get_attribute('innerHTML')
# clean script content
script_content = re.sub(r"[\n\t]*", "", script_content)
script_content = script_content[script_content.index("matchId"):script_content.rindex("}")]
# this will give script content in list form
script_content_list = list(filter(None, script_content.strip().split(', ')))
metadata = script_content_list.pop(1)
# string format to json format
match_data = json.loads(metadata[metadata.index('{'):])
keys = [item[:item.index(':')].strip() for item in script_content_list]
values = [item[item.index(':')+1:].strip() for item in script_content_list]
for key,val in zip(keys, values):
match_data[key] = json.loads(val)
# get other details about the match
region = driver.find_element_by_xpath('//*[@id="breadcrumb-nav"]/span[1]').text
league = driver.find_element_by_xpath('//*[@id="breadcrumb-nav"]/a').text.split(' - ')[0]
season = driver.find_element_by_xpath('//*[@id="breadcrumb-nav"]/a').text.split(' - ')[1]
if len(driver.find_element_by_xpath('//*[@id="breadcrumb-nav"]/a').text.split(' - ')) == 2:
competition_type = 'League'
competition_stage = ''
elif len(driver.find_element_by_xpath('//*[@id="breadcrumb-nav"]/a').text.split(' - ')) == 3:
competition_type = 'Knock Out'
competition_stage = driver.find_element_by_xpath('//*[@id="breadcrumb-nav"]/a').text.split(' - ')[-1]
else:
print('Getting more than 3 types of information about the competition.')
match_data['region'] = region
match_data['league'] = league
match_data['season'] = season
match_data['competitionType'] = competition_type
match_data['competitionStage'] = competition_stage
# sort match_data dictionary alphabetically
match_data = OrderedDict(sorted(match_data.items()))
match_data = dict(match_data)
if display:
print('Region: {}, League: {}, Season: {}, Match Id: {}'.format(region, league, season, match_data['matchId']))
if close_window:
driver.close()
return match_data
def createEventsDF(data):
events = data['events']
for event in events:
event.update({'matchId' : data['matchId'],
'startDate' : data['startDate'],
'startTime' : data['startTime'],
'score' : data['score'],
'ftScore' : data['ftScore'],
'htScore' : data['htScore'],
'etScore' : data['etScore'],
'venueName' : data['venueName'],
'maxMinute' : data['maxMinute']})
events_df = pd.DataFrame(events)
# clean period column
events_df['period'] = pd.json_normalize(events_df['period'])['displayName']
# clean type column
events_df['type'] = pd.json_normalize(events_df['type'])['displayName']
# clean outcomeType column
events_df['outcomeType'] = pd.json_normalize(events_df['outcomeType'])['displayName']
# clean outcomeType column
try:
x = events_df['cardType'].fillna({i: {} for i in events_df.index})
events_df['cardType'] = | pd.json_normalize(x) | pandas.json_normalize |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, option_context
import pandas._testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(norows_df.dtypes, pd.Series(object, index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool_), ("c", np.float64)])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_singlerow_slice_categoricaldtype_gives_series(self):
# GH29521
df = pd.DataFrame({"x": pd.Categorical("a b c d e".split())})
result = df.iloc[0]
raw_cat = pd.Categorical(["a"], categories=["a", "b", "c", "d", "e"])
expected = pd.Series(raw_cat, index=["x"], name=0, dtype="category")
tm.assert_series_equal(result, expected)
def test_timedeltas(self):
df = DataFrame(
dict(
A=Series(date_range("2012-1-1", periods=3, freq="D")),
B=Series([timedelta(days=i) for i in range(3)]),
)
)
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")], index=list("AB")
)
tm.assert_series_equal(result, expected)
df["C"] = df["A"] + df["B"]
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
],
index=list("ABC"),
)
tm.assert_series_equal(result, expected)
# mixed int types
df["D"] = 1
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
np.dtype("int64"),
],
index=list("ABCD"),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([ | pd.Interval(left=0, right=5) | pandas.Interval |
import numpy as np
import pandas as pd
def generate_gbm_returns(mu, sigma, samples, scenarios, random_state=0):
"""
:param mu: vector of expected returns
:param sigma: vector of variances - returns are assumed uncorrelated
:param samples: number of samples in each scenario
:param scenarios: number of scenarios
:param random_state: random seed
:return: a data frame of sample returns of shape (len(mu), samples, scenarios)
"""
#TODO
pass
# TODO: use random generator with seed
def geometric_brownian_motion(mu, sigma,
years, scenarios,
initial_price=1.0,
steps_per_year=12,
prices=True):
"""
Evolution of stock price using a Geometric Brownian Motion model
Generates an ensemble of time series of prices according to GBM
:param mu: the mean drift
:param sigma: the price volatility
:param years: number of years to simulate
:param steps_per_year: Number of periods per year
:param scenarios: number of sample paths to simulate
:param initial_price: initial price
:param prices: return prices if True, returns if False
:return: A data frame with all the price (or return) sample paths
"""
dt = 1 / steps_per_year
n_steps = int(years * steps_per_year)
rets_plus_1 = np.random.normal(size=(n_steps, scenarios), loc=1 + mu * dt, scale=sigma * np.sqrt(dt))
# fix the first row
rets_plus_1[0] = 1
return initial_price * pd.DataFrame(rets_plus_1).cumprod() if prices else | pd.DataFrame(rets_plus_1 - 1) | pandas.DataFrame |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
| tm.assert_index_equal(rng, expected) | pandas.util.testing.assert_index_equal |
#################################################################################
#### Package
#################################################################################
import datetime
import time
import sys
####
from tensorflow import keras
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, pooling
import matplotlib.pyplot as plt
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
####
import tensorflow as tf
from tensorflow.keras import backend as K
from tf_keras_vis.utils import normalize
from tf_keras_vis.gradcam import GradcamPlusPlus
####
from sklearn.metrics import roc_curve, auc
from numpy import interp
from itertools import cycle
import os
####
#np_load_old = np.load
#np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
####
f_list = os.listdir('../results/')
dir_name = 'combi_cnn'
if dir_name not in f_list:
os.system("mkdir ../results/"+dir_name)
os.system("mkdir ../results/"+dir_name+'/gcam/')
else:
os.system("rm -r ../results/"+dir_name)
os.system("mkdir ../results/"+dir_name)
os.system("mkdir ../results/"+dir_name+'/gcam/')
start = time.time()
#################################################################################
#### CNN - Classification of tumor & normal cells using gene-combinations
#################################################################################
X_nor = np.load("../results/training_normal.npy")
X_abn = np.load("../results/training_tumor.npy")
Y_nor = np.ndarray(shape=(X_nor.shape[0], 1))
for i in range(X_nor.shape[0]):
Y_nor[i] = 0
Y_abn = np.ndarray(shape=(X_abn.shape[0], 1))
for i in range(Y_abn.shape[0]):
Y_abn[i] = 1
X_nor = X_nor.reshape(X_nor.shape[0], 4, 9900, 1)
X_abn = X_abn.reshape(X_abn.shape[0], 4, 9900, 1)
X_nor = X_nor.astype('float')
X_abn = X_abn.astype('float')
print(X_nor.shape)
print(X_abn.shape)
X_all = np.concatenate((X_nor, X_abn), axis=0)
Y_all = np.concatenate((Y_nor, Y_abn), axis=0).astype('int')
print (X_all.shape)
print (Y_all.shape)
tmp = np.zeros((Y_all.shape[0], 2))
for i in range(Y_all.shape[0]):
loci = int(Y_all[i])
tmp[i][loci] = 1
Y_all = tmp
####
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
####
line_final = list()
for j in range(10):
count = str(j+1)
X_tmp, X_test, Y_tmp, Y_test = train_test_split(X_all, Y_all, test_size=0.2)
X_train, X_validation, Y_train, Y_validation = train_test_split(X_tmp, Y_tmp, test_size=0.25)
####
model = Sequential()
model.add(Conv2D(32, (2,1), strides=(2,1), activation='relu', input_shape=(4,9900,1)))
model.add(Conv2D(filters=32, kernel_size=(2,1), strides=(1,1), activation='relu'))
model.add(pooling.MaxPooling2D(pool_size=(1,11)))
model.add(Flatten())
model.add(Dense(450, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(45, activation='relu'))
model.add(Dense(2, activation='sigmoid', name="visualized_layer"))
#### Compile the model ####
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy',f1_m, precision_m, recall_m])
#### Training & Test - Fit the model ####
history = model.fit(X_train, Y_train, validation_data=(X_validation, Y_validation), epochs=10, batch_size=500, verbose=1)
#### Evaluate the model ####
loss, accuracy, f1_score, precision, recall = model.evaluate(X_test, Y_test, verbose=1)
#### Save stat and model ####
stat = 'loss\t'+str(loss)+'\n'+'accuracy\t'+str(accuracy)+'\n'+'f1_score\t'+\
str(f1_score)+'\n'+'precision\t'+str(precision)+'\n'+'recall\t'+str(recall)
f_out = open("../results/combi_cnn/combi-cnn_"+count+"_stat.txt","w")
f_out.write(stat)
f_out.close()
model.save('../results/combi_cnn/combi-cnn_'+count+'.h5')
#################################################################################
#### Save performance result.
#################################################################################
print('CNN Round : ',count)
line_final.append('\n# Round No.'+str(count))
Y_pred_keras = model.predict(X_test)
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(2):
fpr[i], tpr[i], _ = roc_curve(Y_test[:, i], Y_pred_keras[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
#### Micro average ####
fpr["micro"], tpr["micro"], _ = roc_curve(Y_test.ravel(), Y_pred_keras.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
#### Macro average ####
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(2)]))
mean_tpr = np.zeros_like(all_fpr)
for i in range(2):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= 2
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
AUC_val = roc_auc["micro"]
cnt_nor = len(X_nor)
cnt_tum = len(X_abn)
print(AUC_val,cnt_nor,cnt_tum)
line_ext = [line.strip() for line in open('../results/combi_cnn/combi-cnn_'+count+'_stat.txt')]
line_ext.append('AUC\t'+str(AUC_val))
line_ext.append('Tumor/Normal\t'+str(cnt_tum)+'/'+str(cnt_nor))
f_out = open('../results/combi_cnn/combi-cnn_'+count+'_stat.txt',"w")
f_out.write('\n'.join(line_ext))
f_out.close()
line_final = line_final+line_ext
#################################################################################
#### GradCAM++ visualization
#################################################################################
f_list = os.listdir('../results/combi_cnn/gcam/')
dir_name = 'gcam_'+count
if dir_name not in f_list:
os.system("mkdir ../results/combi_cnn/gcam/"+dir_name)
else:
os.system("rm -r ../results/combi_cnn/gcam/"+dir_name)
os.system("mkdir ../results/combi_cnn/gcam/"+dir_name)
def loss(output):
return output[0][1]
def model_modifier(m):
m.layers[-1].activation = tf.keras.activations.linear
return m
col_names = [line.strip() for line in open("../results/genes_shuffle.txt")]
for i in range(X_abn.shape[0]):
if i%1000 == 0:
print(i)
input_image = X_abn[i]
gradcam = GradcamPlusPlus(model, model_modifier, clone=False)
result = normalize(gradcam(loss, input_image, penultimate_layer=-1))
result = | pd.DataFrame(result[0]) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import os
import tempfile
import skbio
import pandas as pd
import qiime2
from qiime2.util import redirected_stdio
import numpy as np
from q2_diversity import adonis
import pandas.testing as pdt
from qiime2.plugin.testing import TestPluginBase
class AdonisTests(TestPluginBase):
package = 'q2_diversity'
def setUp(self):
super().setUp()
self.dm = skbio.DistanceMatrix(
[[0, 0.5, 1], [0.5, 0, 0.75], [1, 0.75, 0]],
ids=['sample1', 'sample2', 'sample3'])
def test_execute_and_validate_output(self):
md = qiime2.Metadata(pd.DataFrame(
[[1, 'a'], [1, 'b'], [2, 'b']], columns=['number', 'letter'],
index=pd.Index(['sample1', 'sample2', 'sample3'], name='id')))
exp = pd.DataFrame(
[[1.0, 0.322916667, 0.322916667, 0.0, 0.534482759, 1.0],
[1.0, 0.281250000, 0.281250000, 0.0, 0.465517241, 1.0],
[0.0, -1.403048e-18, -np.Infinity, np.nan, -2.322286e-18, np.nan],
[2.0, 0.604166667, np.nan, np.nan, 1.0, np.nan]],
columns=['Df', 'SumsOfSqs', 'MeanSqs', 'F.Model', 'R2', 'Pr(>F)'],
index=['letter', 'number', 'Residuals', 'Total'])
with tempfile.TemporaryDirectory() as temp_dir_name:
adonis(temp_dir_name, self.dm, md, 'letter+number')
with open(os.path.join(temp_dir_name, 'adonis.tsv'), 'r') as fh:
res = pd.read_csv(fh, sep='\t')
pdt.assert_frame_equal(
res, exp, check_dtype=False, check_frame_type=False)
def test_adonis_handles_single_quotes_in_metadata(self):
md = qiime2.Metadata(pd.DataFrame(
[[1, 'a\'s'], [1, 'b\'s'], [2, 'b\'s'], [2, 'a\'s']],
columns=['number', 'letter'],
index=pd.Index(['sample1', 'sample2', 'sample3', 'F'], name='id')))
with tempfile.TemporaryDirectory() as temp_dir_name:
adonis(temp_dir_name, self.dm, md, 'letter+number')
def test_metadata_is_superset(self):
md = qiime2.Metadata(pd.DataFrame(
[[1, 'a'], [1, 'b'], [2, 'b'], [2, 'a']],
columns=['number', 'letter'],
index=pd.Index(['sample1', 'sample2', 'sample3', 'F'], name='id')))
with tempfile.TemporaryDirectory() as temp_dir_name:
adonis(temp_dir_name, self.dm, md, 'letter+number')
def test_metadata_is_subset(self):
md = qiime2.Metadata(pd.DataFrame(
[[1, 'a'], [1, 'b'], [2, 'b']], columns=['number', 'letter'],
index= | pd.Index(['sample1', 'sample2', 'peanuts'], name='id') | pandas.Index |
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import glob
import logging
import os
import pandas as pd
from ts_datasets.base import BaseDataset
logger = logging.getLogger(__name__)
class ETT(BaseDataset):
"""
ETT (Electricity Transformer Temperature): The ETT is a crucial indicator in the electric
power long-term deployment. We collected 2-year data from two separated counties in China.
- source: https://github.com/zhouhaoyi/ETDataset
- contains one 7-variable time series, target is OT (oil temp)
"""
def __init__(self, rootdir=None):
"""
:param rootdir: The root directory at which the dataset can be found.
"""
super().__init__()
fnames = ["https://jgoode.s3.amazonaws.com/ts-datasets/ETTh1.csv"]
start_timestamp = "2016-07-01 00:00:00"
for i, fn in enumerate(sorted(fnames)):
df = pd.read_csv(fn, index_col="date", parse_dates=True)
df = df[df.index >= start_timestamp]
# put the target at the beginning
df = df.loc[:, ["OT", "HUFL", "HULL", "MUFL", "MULL", "LUFL", "LULL"]]
df.index.rename("timestamp", inplace=True)
assert isinstance(df.index, pd.DatetimeIndex)
df.sort_index(inplace=True)
self.time_series.append(df)
self.metadata.append(
{
# punt on this for now
"trainval": | pd.Series(df.index <= start_timestamp, index=df.index) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import scipy.stats.distributions as dist
import os
from tqdm import trange
######################################################################################################
urls = []
for i in range(36):
urls.append(f"https://censusindia.gov.in/2011census/C-17/DDW-C17-{i:02}00.XLSX")
percentage_and_p_value1,percentage_and_p_value2,percentage_and_p_value3 = [],[],[]
for i in trange(36):
df = pd.read_excel(urls[i])
lang_pop = df.iloc[:,[5,6,10,11,15,16]].fillna(0).iloc[5:].sum(axis = 0)
total_state_pop = df.iloc[:,[5,6]].fillna(0).iloc[5:].sum(axis = 0)
exactly_one_male = lang_pop[0] - lang_pop[2]
two_male = lang_pop[2] - lang_pop[4]
three_male = lang_pop[4]
exactly_one_female = lang_pop[1] - lang_pop[3]
two_female = lang_pop[3] - lang_pop[5]
three_female = lang_pop[5]
## for exactly 1 languages
male_percentage, female_percentage = exactly_one_male/total_state_pop[0]*100, exactly_one_female/total_state_pop[1]*100
## calculating p-value
p1 = exactly_one_male/total_state_pop[0]
p2 = exactly_one_female/total_state_pop[1]
p = (exactly_one_male+exactly_one_female)/sum(total_state_pop)
n1 = total_state_pop[0]
n2 = total_state_pop[1]
z = (p1-p2)/(np.sqrt(p*(1-p)*(1/n1 + 1/n2)))
p_value = 2*min(dist.norm.cdf(-np.abs(z)), dist.norm.cdf(np.abs(z)))
percentage_and_p_value1.append([male_percentage,female_percentage,p_value])
## for exactly 2 languages
male_percentage, female_percentage = two_male/total_state_pop[0]*100, two_female/total_state_pop[1]*100
## calculating p-value
p1 = two_male/total_state_pop[0]
p2 = two_female/total_state_pop[1]
p = (two_male+two_female)/sum(total_state_pop)
n1 = total_state_pop[0]
n2 = total_state_pop[1]
z = (p1-p2)/(np.sqrt(p*(1-p)*(1/n1 + 1/n2)))
p_value = 2*min(dist.norm.cdf(-np.abs(z)), dist.norm.cdf(np.abs(z)))
percentage_and_p_value2.append([male_percentage,female_percentage,p_value])
## for 3 or more languages
male_percentage, female_percentage = three_male/total_state_pop[0]*100, three_female/total_state_pop[1]*100
## calculating p-value
p1 = three_male/total_state_pop[0]
p2 = three_female/total_state_pop[1]
p = (three_male+three_female)/sum(total_state_pop)
n1 = total_state_pop[0]
n2 = total_state_pop[1]
z = (p1-p2)/(np.sqrt(p*(1-p)*(1/n1 + 1/n2)))
p_value = 2*min(dist.norm.cdf(-np.abs(z)), dist.norm.cdf(np.abs(z)))
percentage_and_p_value3.append([male_percentage,female_percentage,p_value])
final_df1 = | pd.DataFrame(percentage_and_p_value1) | pandas.DataFrame |
"""Script to add interval on GVA and population file
Run script on 'data' folder in scenarios_not_extracted folder
"""
import os
import pandas as pd
import numpy as np
from energy_demand.basic import lookup_tables
from energy_demand.basic import basic_functions
def run(
path_to_folder,
path_MSOA_baseline,
MSOA_calculations=False,
geography_name='region',
scenarios_to_generate=[]
):
"""
path_to_folder : str
Path to data folder
path_MSOA_baseline : str
Path to MSOA file with correct geography in csv
"""
sectors_to_generate = [2, 3, 4, 5, 6, 8, 9, 29, 11, 12, 10, 15, 14, 19, 17, 40, 41, 28, 35, 23, 27]
# Get all folders with scenario run results
all_csv_folders = basic_functions.get_all_folders_files(path_to_folder)
# Lookup of economic sectors
LAD_MSOA_lu = lookup_tables.lad_msoa_mapping()
base_yr = 2015
end_yr = 2050
# ---------------------------------------------------------------------------------------------------
# Create scenario with CONSTANT (2015) population and CONSTANT GVA
# ---------------------------------------------------------------------------------------------------
'''
empty_folder_name = os.path.join(path_to_folder, "constant_pop_gva")
basic_functions.delete_folder(empty_folder_name)
os.makedirs(empty_folder_name)
wrote_out_pop, wroute_out_GVA = False, False #Do not change
# Creat empty dataframe
columns = ['timestep', 'sector', 'lad_uk_2016', 'value']
# Get folder with standard scenario to get data for constant scenario
for folder_name in ['pop-baseline16_econ-c16_fuel-c16']:
all_files = os.listdir(os.path.join(path_to_folder, folder_name))
# Scale for every year according to this distribution
for file_name in all_files:
filename_split = file_name.split("__")
if (filename_split[0] == "gva_per_head" and filename_split[1] == 'lad_sector.csv') or (
filename_split[0] == "population" and filename_split[1] == 'lad.csv'):
file_path = os.path.join(path_to_folder, folder_name, file_name)
print("Change file: " + str(file_path))
# Read csv file
gp_file = pd.read_csv(file_path)
# Replace future pop with 2015 pop
gp_file_selection_2015 = gp_file.loc[gp_file['year'] == 2015] #Data of 2015
list_with_all_vals = []
for year in range(base_yr, end_yr + 1):
gp_file_selection_yr = gp_file_selection_2015
gp_file_selection_yr['year'] = year
list_with_all_vals += gp_file_selection_yr.values.tolist()
# Save as file
new_dataframe = pd.DataFrame(list_with_all_vals, columns=gp_file.columns)
file_path_out = os.path.join(empty_folder_name, file_name)
new_dataframe.to_csv(file_path_out, index=False) #Index prevents writing index rows
# ---
# MSOA pop calculation
# ----
if MSOA_calculations:
if (filename_split[0] == "population" and filename_split[1] == 'lad.csv'):
# Calculate relative pop percentage of ONS scenarios
msoa_principalDF = pd.read_csv(path_MSOA_baseline)
msoa_principalDF_selection_2015 = msoa_principalDF.loc[msoa_principalDF['year'] == 2015]
# LADs and calculate factor per MSOA
factor_msoas = {}
for lad, msoas in LAD_MSOA_lu.items():
tot_pop_lad = 0
for msoa in msoas:
tot_pop_lad += float(msoa_principalDF_selection_2015.loc[msoa_principalDF_selection_2015['region'] == msoa]['value'])
for msoa in msoas:
pop_msoa = float(msoa_principalDF_selection_2015.loc[msoa_principalDF_selection_2015['region'] == msoa]['value'])
factor_msoas[msoa] = pop_msoa / tot_pop_lad #calculate fator
list_with_all_vals = []
# READ csv file
gp_file = pd.read_csv(file_path)
pop_LADs_2015 = gp_file.loc[gp_file['year'] == 2015]
for index, row_lad in gp_file.iterrows():
lad = row_lad['region']
try:
corresponding_msoas = LAD_MSOA_lu[lad]
except KeyError:
# No match for northern ireland
corresponding_msoas = [lad]
# Calculate population according to ONS 2015 #pop_LAD = row_lad['value']
pop_LAD_2015 = float(pop_LADs_2015.loc[gp_file['region'] == lad]['value']) #Base year pop
for msoa_name in corresponding_msoas:
try:
pop_ONS_scale_factor = factor_msoas[msoa_name]
except:
pop_ONS_scale_factor = 1 # If not mapped
pop_MSOA_ONS_scaled = pop_LAD_2015 * pop_ONS_scale_factor
new_row = {
'region': msoa_name,
"year": row_lad['year'],
"value": pop_MSOA_ONS_scaled,
"interval": row_lad['interval']}
list_with_all_vals.append(new_row)
msoaDF = pd.DataFrame(list_with_all_vals, columns=gp_file.columns)
file_path_MSOA_out = os.path.join(empty_folder_name, "{}_{}.csv".format(file_name[:-4], "MSOA"))
msoaDF.to_csv(file_path_MSOA_out, index=False)
wrote_out_pop = True
elif (filename_split[0] == "gva_per_head" and filename_split[1] == 'lad.csv'):
file_path = os.path.join(path_to_folder, folder_name, file_name)
print("Change file: " + str(file_path))
gp_file = pd.read_csv(file_path)
# Add new column
gp_file['value'] = 1000
# Replace future pop with 2015 pop
gp_file_selection_2015 = gp_file.loc[gp_file['year'] == 2015] #Data of 2015
list_with_all_vals = []
for year in range(base_yr, end_yr + 1):
gp_file_selection_yr = gp_file_selection_2015
gp_file_selection_yr['year'] = year
list_with_all_vals += gp_file_selection_yr.values.tolist()
new_dataframe = pd.DataFrame(list_with_all_vals, columns=gp_file.columns)
# Save as file
file_path_out = os.path.join(empty_folder_name, file_name)
new_dataframe.to_csv(file_path_out, index=False) #Index prevents writing index rows
# -----------------------------------------
# MSOA GVA calculations
# -----------------------------------------
if MSOA_calculations:
lads = list(gp_file.loc[gp_file['year'] == 2015]['region'])
list_with_all_vals = []
for lad in lads:
try:
corresponding_msoas = LAD_MSOA_lu[lad]
except KeyError:
corresponding_msoas = lad # No match for northern ireland
rows_msoa = gp_file.loc[gp_file['region'] == lad].values
for row_msoa in rows_msoa:
for msoa_name in corresponding_msoas:
#row_msoa[0] = msoa_name
new_row = {
"region": msoa_name,
"year": row_msoa[1],
"value": row_msoa[2],
"interval": row_msoa[3]}
list_with_all_vals.append(new_row)
#msoaDF = msoaDF.append(new_row, ignore_index=True)
# Convert list to dataframe
msoaDF = pd.DataFrame(list_with_all_vals, columns=gp_file.columns)
file_path_MSOA_out = os.path.join(empty_folder_name, "{}_{}.csv".format(file_name[:-4], "MSOA"))
msoaDF.to_csv(file_path_MSOA_out, index=False)
wroute_out_GVA = True
else:
pass
if wrote_out_pop == True and wroute_out_GVA == True:
break
print("... finished generating CONSTANT scenario")'''
# ---------------------------------------------------------------------------------------------------
# Add interval and create individual GVA data for selected sectors
# ---------------------------------------------------------------------------------------------------
columns = ['timestep', 'sectors', geography_name]
# Get all folders with scenario run results (name of folder is scenario)
all_csv_folders_walk = os.walk(path_to_folder)
for root, dirnames, filenames in all_csv_folders_walk:
all_csv_folders = dirnames
break
for folder_name in all_csv_folders:
all_files = os.listdir(os.path.join(path_to_folder, folder_name))
if (scenarios_to_generate == []) or (folder_name in scenarios_to_generate):
print("folder name: " + str(folder_name), flush=True)
for file_name in all_files:
filename_split = file_name.split("__")
var_name = filename_split[0]
if (var_name == "gva_per_head" and filename_split[1] == 'lad_sector.csv') or (
var_name == "population" and filename_split[1] == 'lad.csv') or (
var_name == "gva_per_head" and filename_split[1] == 'lad.csv'):
try:
file_path = os.path.join(path_to_folder, folder_name, file_name)
print("file_path " + str(file_path))
gp_file = pd.read_csv(file_path)
gp_file['year'] = gp_file['year'].astype(int)
# Drop all rows with alls NaN entries
gp_file = gp_file[np.isfinite(gp_file['value'])]
# Select all entries with matching years
gp_file = gp_file.loc[gp_file['year'].isin(range(base_yr, end_yr + 1))]
# Rename columns
gp_file = gp_file.rename(index=str, columns={"year": "timestep"})
gp_file = gp_file.rename(index=str, columns={"region": geography_name})
gp_file = gp_file.rename(index=str, columns={"value": var_name})
gp_file.to_csv(file_path, index=False)
# ---
# MSOA pop calculation
# ----
if MSOA_calculations:
if (filename_split[0] == "population" and filename_split[1] == 'lad.csv'):
# Calculate relative pop percentage of ONS scenarios
msoa_principalDF = pd.read_csv(path_MSOA_baseline)
msoa_principalDF_selection_2015 = msoa_principalDF.loc[msoa_principalDF['year'] == 2015]
# LADs and calculate factor per MSOA
factor_msoas = {}
for lad, msoas in LAD_MSOA_lu.items():
tot_pop_lad = 0
for msoa in msoas:
tot_pop_lad += float(msoa_principalDF_selection_2015.loc[msoa_principalDF_selection_2015['region'] == msoa]['value'])
for msoa in msoas:
pop_msoa = float(msoa_principalDF_selection_2015.loc[msoa_principalDF_selection_2015['region'] == msoa]['value'])
factor_msoas[msoa] = pop_msoa / tot_pop_lad #calculate fator
list_with_all_vals = []
gp_file = pd.read_csv(file_path)
for index, row_lad in gp_file.iterrows():
lad = row_lad['region']
try:
corresponding_msoas = LAD_MSOA_lu[lad]
except KeyError:
corresponding_msoas = [lad] # No match for northern ireland
# Calculate population according to ONS 2015
pop_LAD = row_lad['value']
for msoa_name in corresponding_msoas:
try:
pop_ONS_scale_factor = factor_msoas[msoa_name]
except:
pop_ONS_scale_factor = 1 # If not mapped
pop_MSOA_ONS_scaled = pop_LAD * pop_ONS_scale_factor
new_row = {
'region': msoa_name,
"timestep": row_lad['year'],
"value": pop_MSOA_ONS_scaled}
list_with_all_vals.append(new_row)
msoaDF = pd.DataFrame(list_with_all_vals, columns=gp_file.columns)
file_path_MSOA_out = os.path.join(path_to_folder, folder_name, "{}_{}.csv".format(file_name[:-4], "MSOA"))
msoaDF.to_csv(file_path_MSOA_out, index=False)
except:
print("... error in preparing data")
pass
else:
pass
# ----------------------------------------------------------
# Script to generate sectors file
# ----------------------------------------------------------
if (filename_split[0] == "gva_per_head" and filename_split[1] == 'lad_sector.csv'):
#try:
file_path = os.path.join(path_to_folder, folder_name, file_name)
df = pd.read_csv(file_path)
df = df.rename(index=str, columns={"year": "timestep"})
df = df.rename(index=str, columns={"economic_sector__gor": "sectors"})
df = df.rename(index=str, columns={"gva_per_head": "gva_per_sector"})
# Drop columns
try:
df = df.drop('interval', 1)
except:
pass
# Select all entries with matching sectors
df = df.loc[df['sectors'].isin(sectors_to_generate)]
# Select all entries with matching years
df = df.loc[df['timestep'].isin(range(base_yr, end_yr + 1))]
# Write to csv
file_path_sectors = os.path.join(path_to_folder, folder_name, "gva_per_head__lad_sectors.csv")
df.to_csv(file_path_sectors, index=False)
#except:
# pass #Error
# -----------------------------------------
# MSOA GVA calculations
# -----------------------------------------
if MSOA_calculations:
if (filename_split[0] == "gva_per_head" and filename_split[1] == 'lad.csv'):
try:
list_with_all_vals = []
file_path = os.path.join(path_to_folder, folder_name, file_name)
lads = list(gp_file.loc[gp_file['year'] == 2015]['region'])
for lad in lads:
try:
corresponding_msoas = LAD_MSOA_lu[lad]
except KeyError:
# No match for northern ireland
corresponding_msoas = [lad]
rows_msoa = gp_file.loc[gp_file['region'] == lad]
for index, row_msoa in rows_msoa.iterrows():
for msoa_name in corresponding_msoas:
new_row = {
"region": msoa_name,
"timestep": row_msoa['year'],
"value": row_msoa['value']}
list_with_all_vals.append(new_row)
# Convert list to dataframe
msoaDF = | pd.DataFrame(list_with_all_vals, columns=gp_file.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from hot_topics import Clusters
from hot_topics.helpers import STOP_WORDS, clusterTokenizer
from flask import Flask
app = Flask(__name__)
def load_data():
df = | pd.read_csv("articles.csv") | pandas.read_csv |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 検証環境
# - Python 2.7.6
# - pip 1.5.4
# - pandas 0.15.2
# - scikit-learn 0.15.2
# - numpy 1.9.1
# - scipy 0.13.3
# モジュールを読み込む
import re
import math
import pandas as pd
from sklearn import linear_model
##################################
# データごにょごにょするための関数
##################################
# 日付から曜日だけ抽出し, '平日' or '土日祝日' のどちらかの文字列を返す
def extract_youbi(s):
pat = re.compile('\d+/\d+\(([^\)]+)\)')
youbi = pat.match(s.decode("utf-8")).group(1).encode("utf-8")
if '祝' in youbi or '休' in youbi or '日' in youbi or '土' in youbi:
return '土日祝休'
else:
return '平日'
# 節から曜日だけ抽出し, '平日' or '土日祝日' のどちらかの文字列を返す
def extract_turn(s):
pat = re.compile(u'.*?節')
return pat.match(s.decode("utf-8")).group().encode("utf-8")
########################
# データの読み込み処理
########################
# 学習用データの読み込み
df_train_2010 = pd.read_csv("data/train-2010.csv", header = None)
df_train_2011 = pd.read_csv("data/train-2011.csv", header = None)
df_train = pd.read_csv("data/train.csv", header = None)
df_train_2010.columns = df_train_2011.columns = df_train.columns = ["ID", "passenger", "year", "league", "turn", "date", "time", "home", "away", "stadium", "TV"]
# 学習用データをマージ
df_train = pd.concat([df_train_2010, df_train_2011, df_train])
# 予測用データを読み込む
df_test = pd.read_csv("data/test.csv", header = None)
df_test.columns = ["ID", "year", "league", "turn", "date", "time", "home", "away", "stadium", "TV"]
# 学習用データと予測用データを結合する
len_train = len(df_train)
df = pd.concat([df_train, df_test])
##################################
# データを分析用にごにょごにょする
##################################
stadium = | pd.get_dummies(df.stadium) | pandas.get_dummies |
import pandas as pd
import numpy as np
# import matplotlib.pyplot as plt
# import seaborn as sns
# from scipy import stats
from ast import literal_eval
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import linear_kernel, cosine_similarity
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import wordnet
from surprise import Reader, Dataset, SVD, evaluate
import copy
path = '../the-movies-dataset/'
md = pd.read_csv(path + 'final_metadata.csv')
links = pd.read_csv(path + 'final_links.csv')
del md['useless']
del links['useless']
credits = pd.read_csv(path + 'credits.csv')
keywords = pd.read_csv(path + 'keywords.csv')
md['genres'] = md['genres'].fillna('[]').apply(literal_eval).apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else [])
# md['year'] = pd.to_datetime(md['release_date'], errors='coerce').apply(lambda x: [str(x).split('-')[0]] if x != np.nan else [])
# md['year'] = md['year'].fillna('[]').apply(lambda x: [str(int(x))] if isinstance(x, int) or isinstance(x, float) or isinstance(x, str) else [])
md['year'] = md['year'].fillna('[]').apply(literal_eval)
md['popularity'] = md['popularity'].fillna('[]').apply(lambda x: [str(int(x))] if isinstance(x, float) or isinstance(x, int) else [])
links = links[links['tmdbId'].notnull()]['tmdbId'].astype('int')
#md = md.drop([19730, 29503, 35587])
md['id'] = md['id'].astype('int')
smd = md[md['id'].isin(links)]
smd['tagline'] = smd['tagline'].fillna('')
smd['description'] = smd['overview'] + smd['tagline']
smd['description'] = smd['description'].fillna('')
keywords['id'] = keywords['id'].astype('int')
credits['id'] = credits['id'].astype('int')
md['id'] = md['id'].astype('int')
md = md.merge(credits, on='id')
md = md.merge(keywords, on='id')
smd = md[md['id'].isin(links)]
smd['cast'] = smd['cast'].apply(literal_eval)
smd['crew'] = smd['crew'].apply(literal_eval)
smd['keywords'] = smd['keywords'].apply(literal_eval)
smd['cast_size'] = smd['cast'].apply(lambda x: len(x))
smd['crew_size'] = smd['crew'].apply(lambda x: len(x))
def get_director(x):
for i in x:
if i['job'] == 'Director':
return i['name']
return np.nan
indices = pd.Series(smd.index, index=smd['title'])
smd['keywords'] = smd['keywords'].apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else [])
smd['cast'] = smd['cast'].apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else [])
smd['cast'] = smd['cast'].apply(lambda x: x[:3] if len(x) >=3 else x)
smd['cast'] = smd['cast'].apply(lambda x: [str.lower(i.replace(" ", "")) for i in x])
smd['director'] = smd['crew'].apply(get_director)
smd['director'] = smd['director'].astype('str').apply(lambda x: str.lower(x.replace(" ", "")))
smd['director'] = smd['director'].apply(lambda x: [x,x,x])
s = smd.apply(lambda x: pd.Series(x['keywords']),axis=1).stack().reset_index(level=1, drop=True)
s.name = 'keyword'
s = s.value_counts()
s = s[s > 1]
stemmer = SnowballStemmer('english')
stemmer.stem('dogs')
def filter_keywords(x):
words = []
for i in x:
if i in s:
words.append(i)
return words
smd['keywords'] = smd['keywords'].apply(filter_keywords)
smd['keywords'] = smd['keywords'].apply(lambda x: [stemmer.stem(i) for i in x])
smd['keywords'] = smd['keywords'].apply(lambda x: [str.lower(i.replace(" ", "")) for i in x])
smd['soup'] = smd['keywords'] + smd['cast'] + smd['director'] + smd['genres'] + smd['popularity'] + smd['year']
smd['soup'] = smd['soup'].apply(lambda x: ' '.join(x))
count = CountVectorizer(analyzer='word', ngram_range=(1, 2), min_df=0, stop_words='english')
count_matrix = count.fit_transform(smd['soup'])
cosine_sim = cosine_similarity(count_matrix, count_matrix)
smd = smd.reset_index()
titles = smd['title']
indices = | pd.Series(smd.index, index=smd['title']) | pandas.Series |
# ____ ____
# / /\/ /
# /___/ \ / Copyright (c) 2021, Xilinx®.
# \ \ \/ Author: <NAME> <<EMAIL>>
# \ \
# / /
# /___/ /\
# \ \ / \
# \___\/\___\
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tabnanny import verbose
from turtle import width
from launch import LaunchDescription
import bt2
import sys
import datetime
import os
from wasabi import color
from typing import List, Optional, Tuple, Union
import pandas as pd
import numpy as np
import pprint
from bokeh.plotting.figure import figure, Figure
from bokeh.plotting import output_notebook
from bokeh.io import show
from bokeh.layouts import row
from bokeh.models import ColumnDataSource, DatetimeTickFormatter, PrintfTickFormatter, Legend, Segment
from bokeh.models.annotations import Label
# color("{:02x}".format(x), fg=16, bg="green")
# debug = True # debug flag, set to True if desired
def get_change(first, second):
"""
Get change in percentage between two values
"""
if first == second:
return 0
try:
return (abs(first - second) / second) * 100.0
except ZeroDivisionError:
return float("inf")
def add_durations_to_figure(
figure: Figure,
segment_type: str,
durations: List[Union[Tuple[datetime.datetime, datetime.datetime]]],
color: str,
line_width: int = 60,
legend_label: Optional[str] = None,
) -> None:
for duration in durations:
duration_begin, duration_end, _ = duration
base_kwargs = dict()
if legend_label:
base_kwargs['legend_label'] = legend_label
figure.line(
x=[duration_begin, duration_end],
y=[segment_type, segment_type],
color=color,
line_width=line_width,
**base_kwargs,
)
def add_markers_to_figure(
figure: Figure,
segment_type: str,
times: List[datetime.datetime],
color: str,
line_width: int = 60,
legend_label: Optional[str] = None,
size: int = 30,
marker_type: str = 'diamond',
) -> None:
for time in times:
base_kwargs = dict()
if legend_label:
base_kwargs['legend_label'] = legend_label
if marker_type == 'diamond':
figure.diamond(
x=[time],
y=[segment_type],
fill_color=color,
line_color=color,
size=size,
**base_kwargs,
)
elif marker_type == 'plus':
figure.plus(
x=[time],
y=[segment_type],
fill_color=color,
line_color=color,
size=size,
**base_kwargs,
)
else:
assert False, 'invalid marker_type value'
def msgsets_from_trace(tracename):
global target_chain
# Create a trace collection message iterator from the first command-line
# argument.
msg_it = bt2.TraceCollectionMessageIterator(tracename)
# Iterate the trace messages and pick ros2 ones
image_pipeline_msgs = []
for msg in msg_it:
# `bt2._EventMessageConst` is the Python type of an event message.
if type(msg) is bt2._EventMessageConst:
# An event message holds a trace event.
event = msg.event
# Only check `sched_switch` events.
if ("ros2" in event.name):
image_pipeline_msgs.append(msg)
# Form sets with each pipeline
image_pipeline_msg_sets = []
new_set = [] # used to track new complete sets
chain_index = 0 # track where in the chain we are so far
vpid_chain = -1 # used to track a set and differentiate from other callbacks
# NOTE: NOT CODED FOR MULTIPLE NODES RUNNING CONCURRENTLY
# this classification is going to miss the initial matches because
# "ros2:callback_start" will not be associated with the target chain and it won't stop
# being considered until a "ros2:callback_end" of that particular process is seen
for index in range(len(image_pipeline_msgs)):
# first one
if chain_index == 0 and image_pipeline_msgs[index].event.name == target_chain[chain_index]:
new_set.append(image_pipeline_msgs[index])
vpid_chain = image_pipeline_msgs[index].event.common_context_field.get("vpid")
chain_index += 1
# print(color("Found: " + str(image_pipeline_msgs[index].event.name) + " - " + str([x.event.name for x in new_set]), fg="blue"))
# last one
elif image_pipeline_msgs[index].event.name == target_chain[chain_index] and target_chain[chain_index] == target_chain[-1] and \
new_set[-1].event.name == target_chain[-2] and \
image_pipeline_msgs[index].event.common_context_field.get("vpid") == vpid_chain:
new_set.append(image_pipeline_msgs[index])
image_pipeline_msg_sets.append(new_set)
# print(color("Found: " + str(image_pipeline_msgs[index].event.name) + " - " + str([x.event.name for x in new_set]), fg="blue"))
chain_index = 0 # restart
new_set = [] # restart
# match
elif image_pipeline_msgs[index].event.name == target_chain[chain_index] and \
image_pipeline_msgs[index].event.common_context_field.get("vpid") == vpid_chain:
new_set.append(image_pipeline_msgs[index])
chain_index += 1
# print(color("Found: " + str(image_pipeline_msgs[index].event.name), fg="green"))
# altered order
elif image_pipeline_msgs[index].event.name in target_chain and \
image_pipeline_msgs[index].event.common_context_field.get("vpid") == vpid_chain:
new_set.append(image_pipeline_msgs[index])
# print(color("Altered order: " + str([x.event.name for x in new_set]) + ", restarting", fg="red"))
chain_index = 0 # restart
new_set = [] # restart
return image_pipeline_msg_sets
def msgsets_from_trace_concurrent(tracename):
global target_chain
# NOTE: considered chains of "ros2:rclcpp_publish" roughly
# Create a trace collection message iterator from the first command-line
# argument.
msg_it = bt2.TraceCollectionMessageIterator(tracename)
# Iterate the trace messages and pick ros2 ones
image_pipeline_msgs = []
for msg in msg_it:
# `bt2._EventMessageConst` is the Python type of an event message.
if type(msg) is bt2._EventMessageConst:
# An event message holds a trace event.
event = msg.event
# Only check `sched_switch` events.
if ("ros2" in event.name):
image_pipeline_msgs.append(msg)
# Form sets with each pipeline
image_pipeline_msg_sets = []
candidates = {} # dict of sets (vpid as key) being considered as candicates to be complete
# NOTE:
# - vpid remains the same for all Components in an executor, even if multithreaded
# - vtid changes for each component in a multithreaded executor
for trace in image_pipeline_msgs:
vtid = trace.event.common_context_field.get("vtid")
if trace.event.name == target_chain[0]:
if (vtid in candidates) and (candidates[vtid][-1].event.name == target_chain[-1]): # account for chained traces, use "ros2:callback_end"
# print(color("Continuing: " + str(trace.event.name), fg="green"))
candidates[vtid].append(trace)
elif vtid in candidates:
# print(color("Already a set, re-starting: " + str(trace.event.name) + " - " \
# + str([x.event.name for x in candidates[vtid]]) , fg="yellow"))
candidates[vtid] = [trace] # already a set existing (pop and) re-start
else:
candidates[vtid] = [trace] # new set
# print(color("New: " + str(trace.event.name) + " - " + \
# str([x.event.name for x in candidates[vtid]]), fg="blue"))
elif (trace.event.name in target_chain) and (vtid in candidates):
if len(candidates[vtid]) >= 9 and (trace.event.name in target_chain[9:]):
trace_index = target_chain[9:].index(trace.event.name) + 9
expected_index = target_chain[9:].index(candidates[vtid][-1].event.name) + 1 + 9
elif len(candidates[vtid]) >= 9:
# print(color("Skipping: " + str(trace.event.name), fg="yellow"))
continue # skip
else:
trace_index = target_chain.index(trace.event.name)
expected_index = target_chain.index(candidates[vtid][-1].event.name) + 1
# Account for chains of callbacks
if trace.event.name == target_chain[-1] and candidates[vtid][-1].event.name == target_chain[0]:
if len(candidates[vtid]) > 1:
candidates[vtid] = candidates[vtid][:-1] # pop last start and continue looking
# print(color("Chain of callbacks, popping: " + str(trace.event.name) , fg="yellow"))
else:
candidates.pop(vtid)
# print(color("Chain of callbacks while starting, popping: " + str(trace.event.name) , fg="yellow"))
elif trace_index == expected_index:
candidates[vtid].append(trace)
# print(color("Found: " + str(trace.event.name), fg="green"))
if trace.event.name == target_chain[-1] and candidates[vtid][-2].event.name == target_chain[-2] \
and len(candidates[vtid]) == len(target_chain): # last one
image_pipeline_msg_sets.append(candidates[vtid])
# print(color("complete set!", fg="pink"))
candidates.pop(vtid)
else:
if trace.event.name == "ros2:rclcpp_publish" or \
trace.event.name == "ros2:rcl_publish" or \
trace.event.name == "ros2:rmw_publish":
# print(color("Potential chain of publish: " + str(trace.event.name) + ", skipping" , fg="yellow"))
pass
else:
candidates[vtid].append(trace)
# print(color("Altered order: " + str([x.event.name for x in candidates[vtid]]) + ", discarding", fg="red"))
candidates.pop(vtid)
else:
# print(color("Skipped: " + str(trace.event.name), fg="grey"))
pass
return image_pipeline_msg_sets
def barplot_all(image_pipeline_msg_sets, title="Barplot"):
global target_chain
global target_chain_dissambiguous
image_pipeline_msg_sets_ns = []
for set_index in range(len(image_pipeline_msg_sets)):
aux_set = []
target_chain_ns = []
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
target_chain_ns.append(image_pipeline_msg_sets[set_index][msg_index].default_clock_snapshot.ns_from_origin)
init_ns = target_chain_ns[0]
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
aux_set.append((target_chain_ns[msg_index] - init_ns)/1e6)
image_pipeline_msg_sets_ns.append(aux_set)
df = pd.DataFrame(image_pipeline_msg_sets_ns)
df.columns = target_chain_dissambiguous
import plotly.express as px
# pd.set_option("display.max_rows", None, "display.max_columns", None)
# print(df)
fig = px.box(
df,
points="all",
template="plotly_white",
title=title,
)
fig.update_xaxes(title_text = "Trace event")
fig.update_yaxes(title_text = "Milliseconds")
fig.show()
def traces(msg_set):
global target_chain_colors_fg_bokeh
global segment_types
global target_chain_marker
global target_chain
global target_chain_layer
fig = figure(
title='Image pipeline tracing',
x_axis_label=f'Milliseconds',
y_range=segment_types,
plot_width=2000,
plot_height=600,
)
fig.title.align = 'center'
fig.title.text_font_size = '20px'
# fig.xaxis[0].formatter = DatetimeTickFormatter(milliseconds = ['%3Nms'])
fig.xaxis[0].formatter = PrintfTickFormatter(format="%f ms")
fig.xaxis[0].ticker.desired_num_ticks = 20
fig.xaxis[0].axis_label_text_font_size = '30px'
fig.yaxis[0].major_label_text_font_size = '25px'
target_chain_ns = []
for msg_index in range(len(msg_set)):
target_chain_ns.append(msg_set[msg_index].default_clock_snapshot.ns_from_origin)
init_ns = target_chain_ns[0]
print("1")
# draw durations
## rclcpp callbacks - rectify
callback_start = (target_chain_ns[0] - init_ns)/1e6
callback_end = (target_chain_ns[8] - init_ns)/1e6
duration = callback_end - callback_start
add_durations_to_figure(
fig,
target_chain_layer[0],
[(callback_start, callback_start + duration, duration)],
'lightgray'
)
## rclcpp callbacks - resize
callback_start = (target_chain_ns[9] - init_ns)/1e6
callback_end = (target_chain_ns[17] - init_ns)/1e6
duration = callback_end - callback_start
add_durations_to_figure(
fig,
target_chain_layer[0],
[(callback_start, callback_start + duration, duration)],
'lightgray'
)
## rectify callback
callback_start = (target_chain_ns[1] - init_ns)/1e6
callback_end = (target_chain_ns[7] - init_ns)/1e6
duration = callback_end - callback_start
add_durations_to_figure(
fig,
target_chain_layer[1],
[(callback_start, callback_start + duration, duration)],
'whitesmoke'
)
## rectify op
callback_start = (target_chain_ns[2] - init_ns)/1e6
callback_end = (target_chain_ns[3] - init_ns)/1e6
duration = callback_end - callback_start
add_durations_to_figure(
fig,
target_chain_layer[1],
[(callback_start, callback_start + duration, duration)],
'seashell'
)
## resize callback
callback_start = (target_chain_ns[10] - init_ns)/1e6
callback_end = (target_chain_ns[16] - init_ns)/1e6
duration = callback_end - callback_start
add_durations_to_figure(
fig,
target_chain_layer[1],
[(callback_start, callback_start + duration, duration)],
'whitesmoke'
)
## resize op
callback_start = (target_chain_ns[11] - init_ns)/1e6
callback_end = (target_chain_ns[12] - init_ns)/1e6
duration = callback_end - callback_start
add_durations_to_figure(
fig,
target_chain_layer[1],
[(callback_start, callback_start + duration, duration)],
'seashell'
)
print("2")
for msg_index in range(len(msg_set)):
# add_markers_to_figure(fig, msg_set[msg_index].event.name, [(target_chain_ns[msg_index] - init_ns)/1e6], 'blue', marker_type='plus', legend_label='timing')
print("marker ms: " + str((target_chain_ns[msg_index] - init_ns)/1e6))
add_markers_to_figure(
fig,
target_chain_layer[msg_index],
[(target_chain_ns[msg_index] - init_ns)/1e6],
target_chain_colors_fg_bokeh[msg_index],
marker_type=target_chain_marker[msg_index],
# legend_label=msg_set[msg_index].event.name,
legend_label=target_chain_dissambiguous[msg_index],
size=10,
)
if "image_proc_resize_init" in msg_set[msg_index].event.name:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=0,
y_offset=-90,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
elif "image_proc_rectify_init" in msg_set[msg_index].event.name:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=0,
y_offset=-100,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
elif "image_proc_rectify_fini" in msg_set[msg_index].event.name:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=-60,
y_offset=-50,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
elif "image_proc_rectify_cb_fini" in msg_set[msg_index].event.name:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=-30,
y_offset=-50,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
elif "callback_start" in msg_set[msg_index].event.name:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=-30,
y_offset=-90,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
elif "image_proc_resize_fini" in msg_set[msg_index].event.name:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=20,
y_offset=-50,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
else:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=-30,
y_offset=-30,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
fig.add_layout(label)
# hack legend to the right
fig.legend.location = "right"
new_legend = fig.legend[0]
fig.legend[0] = None
fig.add_layout(new_legend, 'right')
show(fig)
def barchart_data(image_pipeline_msg_sets):
"""Converts a tracing message list into its corresponding
relative (to the previous tracepoint) latency list in
millisecond units.
Args:
image_pipeline_msg_sets ([type]): [description]
Returns:
list: list of relative latencies, in ms
"""
image_pipeline_msg_sets_ns = []
for set_index in range(len(image_pipeline_msg_sets)):
aux_set = []
target_chain_ns = []
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
target_chain_ns.append(image_pipeline_msg_sets[set_index][msg_index].default_clock_snapshot.ns_from_origin)
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
if msg_index == 0:
previous = target_chain_ns[0]
else:
previous = target_chain_ns[msg_index - 1]
aux_set.append((target_chain_ns[msg_index] - previous)/1e6)
image_pipeline_msg_sets_ns.append(aux_set)
return image_pipeline_msg_sets_ns
def print_timeline(image_pipeline_msg_sets):
global target_chain
global target_chain_colors_fg
for msg_set in image_pipeline_msg_sets:
if len(msg_set) != len(target_chain):
print(color("Not a complete set: " + str([x.event.name for x in msg_set]), fg="red"))
pass
else:
target_chain_ns = []
for msg_index in range(len(msg_set)):
target_chain_ns.append(msg_set[msg_index].default_clock_snapshot.ns_from_origin)
init_ns = target_chain_ns[0]
fixed_target_chain_ns = [init_ns] + target_chain_ns
# stringout = color("raw image → " + msg_set[0].event.name + " → ")
stringout = color("raw image ")
for msg_index in range(len(msg_set)):
stringout +=" → " + color(msg_set[msg_index].event.name + \
" ({} ms) ".format((fixed_target_chain_ns[msg_index + 1] - fixed_target_chain_ns[msg_index])/1e6),
fg=target_chain_colors_fg[msg_index], bg="black")
# stringout += " → " + msg_set[msg_index].event.name + \
# " ({} ms) ".format((fixed_target_chain_ns[msg_index + 1] - fixed_target_chain_ns[msg_index])/1e6)
stringout += color("total " + \
" ({} ms) ".format((target_chain_ns[-1] - target_chain_ns[0])/1e6), fg="black", bg="white")
print(stringout)
def rms(list):
return np.sqrt(np.mean(np.array(list)**2))
def mean(list):
return np.mean(np.array(list))
def max(list):
return np.max(np.array(list))
def min(list):
return np.min(np.array(list))
def rms_sets(image_pipeline_msg_sets, indices=None):
"""
Root-Mean-Square (RMS) (in the units provided) for a
given number of time trace sets.
NOTE: last value of the lists should not include the total
:param: image_pipeline_msg_sets, list of lists, each containing the time traces
:param: indices, list of indices to consider on each set which will be summed
for rms. By default, sum of all values on each set.
"""
if indices:
with_indices_sets = []
for set in image_pipeline_msg_sets:
indices_sum = 0
for i in indices:
indices_sum += set[i]
with_indices_sets.append(indices_sum)
return rms(with_indices_sets)
else:
total_in_sets = [sum(set) for set in image_pipeline_msg_sets]
return rms(total_in_sets)
def mean_sets(image_pipeline_msg_sets, indices=None):
if indices:
with_indices_sets = []
for set in image_pipeline_msg_sets:
indices_sum = 0
for i in indices:
indices_sum += set[i]
with_indices_sets.append(indices_sum)
return mean(with_indices_sets)
else:
total_in_sets = [sum(set) for set in image_pipeline_msg_sets]
return mean(total_in_sets)
def max_sets(image_pipeline_msg_sets, indices=None):
if indices:
with_indices_sets = []
for set in image_pipeline_msg_sets:
indices_sum = 0
for i in indices:
indices_sum += set[i]
with_indices_sets.append(indices_sum)
return max(with_indices_sets)
else:
total_in_sets = [sum(set) for set in image_pipeline_msg_sets]
return max(total_in_sets)
def min_sets(image_pipeline_msg_sets, indices=None):
if indices:
with_indices_sets = []
for set in image_pipeline_msg_sets:
indices_sum = 0
for i in indices:
indices_sum += set[i]
with_indices_sets.append(indices_sum)
return min(with_indices_sets)
else:
total_in_sets = [sum(set) for set in image_pipeline_msg_sets]
return min(total_in_sets)
def print_timeline_average(image_pipeline_msg_sets):
"""
Doing averages may lead to negative numbers while substracting the previous average.
This is only useful to get an intuition of the totals.
"""
global target_chain
global target_chain_colors_fg
image_pipeline_msg_sets_ns = []
for msg_set in image_pipeline_msg_sets:
if len(msg_set) != len(target_chain):
print(color("Not a complete set: " + str([x.event.name for x in msg_set]), fg="red"))
pass
else:
target_chain_ns = []
final_target_chain_ns = []
for msg_index in range(len(msg_set)):
target_chain_ns.append(msg_set[msg_index].default_clock_snapshot.ns_from_origin)
init_ns = target_chain_ns[0]
fixed_target_chain_ns = [init_ns] + target_chain_ns
for msg_index in range(len(msg_set)):
final_target_chain_ns.append((fixed_target_chain_ns[msg_index + 1] - fixed_target_chain_ns[msg_index]))
final_target_chain_ns.append((fixed_target_chain_ns[-1] - fixed_target_chain_ns[0])) # total
image_pipeline_msg_sets_ns.append(final_target_chain_ns)
image_pipeline_msg_ns_average = [sum(x) / len(x) for x in zip(*image_pipeline_msg_sets_ns)]
# print(image_pipeline_msg_ns_average)
stringout = color("raw image ")
for msg_index in range(len(image_pipeline_msg_ns_average[:-1])):
stringout +=" → " + color(image_pipeline_msg_sets[0][msg_index].event.name + \
" ({} ms) ".format((image_pipeline_msg_ns_average[msg_index + 1] - image_pipeline_msg_ns_average[msg_index])/1e6),
fg=target_chain_colors_fg[msg_index], bg="black")
stringout += color("total " + \
" ({} ms) ".format((image_pipeline_msg_ns_average[-1] - image_pipeline_msg_ns_average[0])/1e6), fg="black", bg="white")
print(stringout)
def statistics(image_pipeline_msg_sets_ms, verbose=False):
global target_chain_dissambiguous
mean_ = mean_sets(image_pipeline_msg_sets_ms)
rms_ = rms_sets(image_pipeline_msg_sets_ms)
min_ = min_sets(image_pipeline_msg_sets_ms)
max_ = max_sets(image_pipeline_msg_sets_ms)
mean_accelerators = mean_sets(image_pipeline_msg_sets_ms,
[
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_rectify_fini"),
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_resize_fini"),
]
)
rms_accelerators = rms_sets(image_pipeline_msg_sets_ms,
[
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_rectify_fini"),
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_resize_fini"),
]
)
max_accelerators = max_sets(image_pipeline_msg_sets_ms,
[
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_rectify_fini"),
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_resize_fini"),
]
)
min_accelerators = min_sets(image_pipeline_msg_sets_ms,
[
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_rectify_fini"),
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_resize_fini"),
]
)
if verbose:
print(color("mean: " + str(mean_), fg="yellow"))
print("rms: " + str(rms_))
print("min: " + str(min_))
print(color("max: " + str(max_), fg="red"))
print(color("mean accelerators: " + str(mean_accelerators), fg="yellow"))
print("rms accelerators: " + str(rms_accelerators))
print("min accelerators: " + str(min_accelerators))
print(color("max accelerators: " + str(max_accelerators), fg="red"))
return [mean_accelerators, rms_accelerators, max_accelerators, min_accelerators, mean_, rms_, max_, min_]
def table(list_sets, list_sets_names):
"""
Creates a markdown table from a list of sets
NOTE: assumes base is always the first set in list_sets, which
is then used to calculate % of change.
"""
list_statistics = []
# generate statistics
for sets in list_sets:
list_statistics.append(statistics(sets))
# Add name to each statistics list
for stat_list_index in range(len(list_statistics)):
list_statistics[stat_list_index].insert(0, list_sets_names[stat_list_index])
# add headers
list_statistics.insert(0, ["---", "---", "---", "---", "---", "---", "---", "---", "---",])
list_statistics.insert(0, [
" ", "Accel. Mean", "Accel. RMS",
"Accel. Max ", "Accel. Min", "Mean",
"RMS", "Max", "Min"])
baseline = list_statistics[2] # baseline for %
length_list = [len(row) for row in list_statistics]
column_width = max(length_list)
count = 0
for row in list_statistics:
row_str = " | "
if count == 2:
for element_index in range(len(row)):
if type(row[element_index]) != str:
if row[element_index] > baseline[element_index]:
row_str += "**{:.2f}** ms".format(row[element_index]) + " (:small_red_triangle_down: `" \
+ "{:.2f}".format(get_change(row[element_index], baseline[element_index])) + "`%) | "
else:
row_str += "**{:.2f}** ms".format(row[element_index]) + " (`" \
+ "{:.2f}".format(get_change(row[element_index], baseline[element_index])) + "`%) | "
else:
row_str += row[element_index] + " | "
else:
for element_index in range(len(row)):
if type(row[element_index]) != str:
if row[element_index] > baseline[element_index]:
row_str += "{:.2f} ms".format(row[element_index]) + " (:small_red_triangle_down: `" \
+ "{:.2f}".format(get_change(row[element_index], baseline[element_index])) + "`%) | "
else:
row_str += "{:.2f} ms".format(row[element_index]) + " (`" \
+ "{:.2f}".format(get_change(row[element_index], baseline[element_index])) + "`%) | "
else:
row_str += row[element_index] + " | "
count += 1
print(row_str)
# if count == 2:
# row = "|" + "|".join("**{:.2f}** ms".format(row[element_index]) + " (`"
# + "{:.2f}".format(get_change(row[element_index], baseline[element_index])) + "`%)"
# if type(row[element_index]) != str
# else row[element_index]
# for element_index in range(len(row))) + "|"
# else:
# row = "|" + "|".join("{:.2f} ms".format(row[element_index]) + " (`"
# + "{:.2f}".format(get_change(row[element_index], baseline[element_index])) + "`%)"
# if type(row[element_index]) != str else row[element_index]
# for element_index in range(len(row))) + "|"
# count += 1
# print(row)
def generate_launch_description():
return LaunchDescription()
##############################
##############################
# targeted chain of messages for tracing
target_chain = [
"ros2:callback_start",
"ros2_image_pipeline:image_proc_rectify_cb_init",
"ros2_image_pipeline:image_proc_rectify_init",
"ros2_image_pipeline:image_proc_rectify_fini",
"ros2:rclcpp_publish",
"ros2:rcl_publish",
"ros2:rmw_publish",
"ros2_image_pipeline:image_proc_rectify_cb_fini",
"ros2:callback_end",
"ros2:callback_start",
"ros2_image_pipeline:image_proc_resize_cb_init",
"ros2_image_pipeline:image_proc_resize_init",
"ros2_image_pipeline:image_proc_resize_fini",
"ros2:rclcpp_publish",
"ros2:rcl_publish",
"ros2:rmw_publish",
"ros2_image_pipeline:image_proc_resize_cb_fini",
"ros2:callback_end",
]
target_chain_dissambiguous = [
"ros2:callback_start",
"ros2_image_pipeline:image_proc_rectify_cb_init",
"ros2_image_pipeline:image_proc_rectify_init",
"ros2_image_pipeline:image_proc_rectify_fini",
"ros2:rclcpp_publish",
"ros2:rcl_publish",
"ros2:rmw_publish",
"ros2_image_pipeline:image_proc_rectify_cb_fini",
"ros2:callback_end",
"ros2:callback_start (2)",
"ros2_image_pipeline:image_proc_resize_cb_init",
"ros2_image_pipeline:image_proc_resize_init",
"ros2_image_pipeline:image_proc_resize_fini",
"ros2:rclcpp_publish (2)",
"ros2:rcl_publish (2)",
"ros2:rmw_publish (2)",
"ros2_image_pipeline:image_proc_resize_cb_fini",
"ros2:callback_end (2)",
]
target_chain_colors_fg = [
"blue",
"yellow",
"red",
"red",
"blue",
"blue",
"blue",
"yellow",
"blue",
"blue",
"yellow",
"red",
"red",
"blue",
"blue",
"blue",
"yellow",
"blue",
]
# target_chain_colors_fg_bokeh = [
# "lightgray",
# "silver",
# "darkgray",
# "gray",
# "dimgray",
# "lightslategray",
# "slategray",
# "darkslategray",
# "black",
# "burlywood",
# "tan",
# "rosybrown",
# "sandybrown",
# "goldenrod",
# "darkgoldenrod",
# "peru",
# "chocolate",
# "saddlebrown",
# # "blue",
# # "blueviolet",
# # "brown",
# # "burlywood",
# # "cadetblue",
# # "chartreuse",
# # "chocolate",
# # "coral",
# # "cornflowerblue",
# ]
target_chain_colors_fg_bokeh = [
"lightsalmon",
"salmon",
"darksalmon",
"lightcoral",
"indianred",
"crimson",
"firebrick",
"darkred",
"red",
"lavender",
"thistle",
"plum",
"fuchsia",
"mediumorchid",
"mediumpurple",
"darkmagenta",
"indigo",
"mediumslateblue",
]
target_chain_layer = [
"rclcpp",
"userland",
"userland",
"userland",
"rclcpp",
"rcl",
"rmw",
"userland",
"rclcpp",
"rclcpp",
"userland",
"userland",
"userland",
"rclcpp",
"rcl",
"rmw",
"userland",
"rclcpp",
]
target_chain_label_layer = [ # associated with the layer
3,
4,
4,
4,
3,
2,
1,
4,
3,
3,
4,
4,
4,
3,
2,
1,
4,
3,
]
target_chain_marker = [
"diamond",
"plus",
"plus",
"plus",
"plus",
"plus",
"plus",
"plus",
"diamond",
"diamond",
"plus",
"plus",
"plus",
"plus",
"plus",
"plus",
"plus",
"diamond",
]
# For some reason it seems to be displayed in the reverse order on the Y axis
segment_types = [
"rmw",
"rcl",
"rclcpp",
"userland"
]
# # ####################
# # print timing pipeline
# # ####################
# # image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_fpga")
# # # print(len(image_pipeline_msg_sets))
# # # print_timeline(image_pipeline_msg_sets) # all timelines
# # print_timeline([image_pipeline_msg_sets[-1]]) # timeline of last message
# # # print_timeline_average(image_pipeline_msg_sets) # timeline of averages, NOTE only totals are of interest
# target_chain = [
# "ros2:callback_start",
# "ros2_image_pipeline:image_proc_rectify_cb_init",
# "ros2_image_pipeline:image_proc_rectify_init",
# "ros2_image_pipeline:image_proc_rectify_fini",
# "ros2:rclcpp_publish",
# "ros2:rcl_publish",
# "ros2:rmw_publish",
# "ros2_image_pipeline:image_proc_rectify_cb_fini",
# "ros2:callback_end",
# ]
# target_chain_colors_fg = [
# "blue",
# "yellow",
# "red",
# "red",
# "blue",
# "blue",
# "blue",
# "yellow",
# "blue",
# ]
# # image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_fpga_integrated")
# image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_fpga_integrated_250_node")
# # print(len(image_pipeline_msg_sets))
# # print_timeline(image_pipeline_msg_sets) # all timelines
# # print_timeline([image_pipeline_msg_sets[-1]]) # timeline of last message
# print_timeline(image_pipeline_msg_sets[-10:]) # timeline of last 10 messages
# # print_timeline_average(image_pipeline_msg_sets) # timeline of averages, NOTE only totals are of interest
######################
# draw tracepoints
######################
# image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize")
# msg_set = image_pipeline_msg_sets[-1]
# traces(msg_set)
# ######################
# # draw barplot all data
# ######################
# # # NOTE: Discard first few
# image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize")
# barplot_all(image_pipeline_msg_sets[10:], title="image_pipeline in CPU")
# image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_fpga")
# barplot_all(image_pipeline_msg_sets[10:], title="image_pipeline in FPGA")
# # image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_stress")
# # barplot_all(image_pipeline_msg_sets[10:], title="image_pipeline in CPU and with stress")
# # image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_fpga_stress")
# # barplot_all(image_pipeline_msg_sets[10:], title="image_pipeline in FPGA and with stress")
# target_chain = [
# "ros2:callback_start",
# "ros2_image_pipeline:image_proc_rectify_cb_init",
# "ros2_image_pipeline:image_proc_rectify_init",
# "ros2_image_pipeline:image_proc_rectify_fini",
# "ros2:rclcpp_publish",
# "ros2:rcl_publish",
# "ros2:rmw_publish",
# "ros2_image_pipeline:image_proc_rectify_cb_fini",
# "ros2:callback_end",
# # "ros2:callback_start",
# # "ros2_image_pipeline:image_proc_resize_cb_init",
# # "ros2_image_pipeline:image_proc_resize_init",
# # "ros2_image_pipeline:image_proc_resize_fini",
# # "ros2:rclcpp_publish",
# # "ros2:rcl_publish",
# # "ros2:rmw_publish",
# # "ros2_image_pipeline:image_proc_resize_cb_fini",
# # "ros2:callback_end",
# ]
# target_chain_dissambiguous = target_chain
# target_chain_colors_fg = [
# "blue",
# "yellow",
# "red",
# "red",
# "blue",
# "blue",
# "blue",
# "yellow",
# "blue",
# # "blue",
# # "yellow",
# # "red",
# # "red",
# # "blue",
# # "blue",
# # "blue",
# # "yellow",
# # "blue",
# ]
# image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_fpga_integrated")
# barplot_all(image_pipeline_msg_sets[10:], title="image_pipeline integrated @ 250 MHz in FPGA")
# target_chain = [
# "ros2:callback_start", "ros2_image_pipeline:image_proc_resize_cb_init",
# "ros2_image_pipeline:image_proc_resize_init", "ros2_image_pipeline:image_proc_resize_fini",
# "ros2:rclcpp_publish", "ros2:rcl_publish", "ros2:rmw_publish",
# "ros2_image_pipeline:image_proc_resize_cb_fini", "ros2:callback_end",
# ]
# target_chain_dissambiguous = target_chain
# image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_test2")
# barplot_all(image_pipeline_msg_sets[10:], title="image_pipeline, streams @ 250 MHz in FPGA")
# ######################
# # draw bar charts
# ######################
#///////////////////
# Data sources
#///////////////////
# # NOTE: Discard first few
discard_count = 10
image_pipeline_msg_sets_ms_cpu = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize")[discard_count:])
image_pipeline_msg_sets_ms_fpga = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga")[discard_count:])
# image_pipeline_msg_sets_ms_fpga_streamlined = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_streamlined")[discard_count:])
# image_pipeline_msg_sets_ms_fpga_streamlined_xrt = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_streamlined_xrt")[discard_count:])
image_pipeline_msg_sets_ms_cpu_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_stress")[discard_count:])
image_pipeline_msg_sets_ms_fpga_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_stress")[discard_count:])
target_chain = [
"ros2:callback_start", "ros2_image_pipeline:image_proc_rectify_cb_init",
"ros2_image_pipeline:image_proc_rectify_init", "ros2_image_pipeline:image_proc_rectify_fini",
"ros2:rclcpp_publish", "ros2:rcl_publish", "ros2:rmw_publish",
"ros2_image_pipeline:image_proc_rectify_cb_fini", "ros2:callback_end",
]
image_pipeline_msg_sets_ms_fpga_integrated = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated)):
image_pipeline_msg_sets_ms_fpga_integrated[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
image_pipeline_msg_sets_ms_fpga_integrated_xrt = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated_xrt")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_xrt)):
image_pipeline_msg_sets_ms_fpga_integrated_xrt[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
image_pipeline_msg_sets_ms_fpga_integrated_streamlined = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated_streamlined")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_streamlined)):
image_pipeline_msg_sets_ms_fpga_integrated_streamlined[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated_streamlined_xrt")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt)):
image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# stress
image_pipeline_msg_sets_ms_fpga_integrated_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated_stress")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_stress)):
image_pipeline_msg_sets_ms_fpga_integrated_stress[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
image_pipeline_msg_sets_ms_fpga_integrated_xrt_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated_xrt_stress")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_xrt_stress)):
image_pipeline_msg_sets_ms_fpga_integrated_xrt_stress[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_integrated_streamlined_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_streamlined_stress")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_streamlined_stress)):
# image_pipeline_msg_sets_ms_fpga_integrated_streamlined_stress[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated_streamlined_xrt_stress")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt_stress)):
image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt_stress[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
target_chain = [
"ros2:callback_start", "ros2_image_pipeline:image_proc_resize_cb_init",
"ros2_image_pipeline:image_proc_resize_init", "ros2_image_pipeline:image_proc_resize_fini",
"ros2:rclcpp_publish", "ros2:rcl_publish", "ros2:rmw_publish",
"ros2_image_pipeline:image_proc_resize_cb_fini", "ros2:callback_end",
]
image_pipeline_msg_sets_ms_fpga_streamlined = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_streamlined")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_streamlined)):
image_pipeline_msg_sets_ms_fpga_streamlined[i_set] = [0, 0, 0, 0, 0, 0, 0, 0, 0] + image_pipeline_msg_sets_ms_fpga_streamlined[i_set]
image_pipeline_msg_sets_ms_fpga_streamlined_xrt = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_streamlined_xrt")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_streamlined_xrt)):
image_pipeline_msg_sets_ms_fpga_streamlined_xrt[i_set] = [0, 0, 0, 0, 0, 0, 0, 0, 0] + image_pipeline_msg_sets_ms_fpga_streamlined_xrt[i_set]
# image_pipeline_msg_sets_ms_fpga_integrated = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated)):
# image_pipeline_msg_sets_ms_fpga_integrated[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_integrated_200 = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_200")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_200)):
# image_pipeline_msg_sets_ms_fpga_integrated_200[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_integrated_250 = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_250")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_250)):
# image_pipeline_msg_sets_ms_fpga_integrated_250[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_integrated_250_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_250_stress")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_250_stress)):
# image_pipeline_msg_sets_ms_fpga_integrated_250_stress[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_integrated_250_xrt = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_250_xrt")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_250_xrt)):
# image_pipeline_msg_sets_ms_fpga_integrated_250_xrt[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_streamlined = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_streamlined")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_streamlined)):
# image_pipeline_msg_sets_ms_fpga_streamlined[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_streamlined_xrt = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_streamlined_xrt")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_streamlined_xrt)):
# image_pipeline_msg_sets_ms_fpga_streamlined_xrt[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# #///////////////////
# # Markdown Table results
# #///////////////////
# table(
# [
# # full pipeline
# image_pipeline_msg_sets_ms_cpu,
# image_pipeline_msg_sets_ms_fpga,
# # # integrated
# image_pipeline_msg_sets_ms_fpga_integrated,
# # image_pipeline_msg_sets_ms_fpga_integrated_xrt,
# # streamlined
# image_pipeline_msg_sets_ms_fpga_streamlined,
# image_pipeline_msg_sets_ms_fpga_streamlined_xrt,
# # # integrated, streamlined
# # image_pipeline_msg_sets_ms_fpga_integrated_streamlined,
# # image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt,
# #
# # # full pipeline stress
# # image_pipeline_msg_sets_ms_cpu,
# # image_pipeline_msg_sets_ms_fpga,
# # # image_pipeline_msg_sets_ms_fpga_streamlined,
# # # image_pipeline_msg_sets_ms_fpga_streamlined_xrt,
# # # integrated stress
# # image_pipeline_msg_sets_ms_fpga_integrated,
# # image_pipeline_msg_sets_ms_fpga_integrated_xrt,
# # # integrated, streamlined stress
# # # image_pipeline_msg_sets_ms_fpga_integrated_streamlined,
# # image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt,
# ],
# [
# # full pipeline
# "CPU **baseline**",
# "FPGA @ 250 MHz",
# # # integrated
# "FPGA, integrated @ 250 MHz",
# # "FPGA, integrated, XRT @ 250 MHz",
# # streamlined
# "FPGA, streams (resize) @ 250 MHz",
# "FPGA, streams (resize), XRT @ 250 MHz",
# # # integrated, streamlined
# # "FPGA, integrated, streams @ 250 MHz",
# # "FPGA, integrated, streams, XRT @ 250 MHz",
# #
# # # full pipeline stress
# # "CPU **baseline**",
# # "FPGA @ 250 MHz",
# # # "FPGA, streams @ 250 MHz",
# # # "FPGA, streams, XRT @ 250 MHz",
# # # integrated stress
# # "FPGA, integrated @ 250 MHz",
# # "FPGA, integrated, XRT @ 250 MHz",
# # # integrated, streamlined stress
# # # "FPGA, integrated, streams @ 250 MHz",
# # "FPGA, integrated, streams, XRT @ 250 MHz",
# ]
# )
#///////////////////
# Plot, either averages or latest, etc
#///////////////////
# # plot latest values
# df_cpu = pd.DataFrame(image_pipeline_msg_sets_ms_cpu[-1:]) # pick the latest one
# df_fpga = pd.DataFrame(image_pipeline_msg_sets_ms_fpga[-1:]) # pick the latest one
# df = pd.concat([df_cpu, df_fpga], ignore_index=True)
# df.columns = target_chain_dissambiguous
# substrates = pd.DataFrame({'substrate': ["CPU","FPGA"]})
# df = df.join(substrates)
# plot averages
df_cpu_mean = pd.DataFrame(image_pipeline_msg_sets_ms_cpu).mean()
df_fpga_mean = pd.DataFrame(image_pipeline_msg_sets_ms_fpga).mean()
df_fpga_mean_streamlined = | pd.DataFrame(image_pipeline_msg_sets_ms_fpga_streamlined) | pandas.DataFrame |
# import all the required files i.e. numpy , pandas and math library
from graphlib.financialGraph import Data
import numpy as np
import pandas as pd
from pandas import DataFrame , Series
import math
# All the indicators are defined and arranged in Alphabetical order
# ------------------> A <------------------------
# [0] __ Average True Range (ATR)
# Moving Average of True Range(TR)
def atr(data: DataFrame, period: int = 14) -> Series:
TR = tr(data)
return pd.Series(
TR.rolling(center=False, window=period,
min_periods=1).mean(),
name=f'{period} ATR'
)
# [0] __ Adaptive Price Zone (APZ)
# TODO
def apz(data: DataFrame,period: int = 21,dev_factor: int = 2,
MA: Series = None,adjust: bool = True,) -> DataFrame:
if not isinstance(MA, pd.Series):
MA = dema(data, period)
price_range = pd.Series(
(data["high"] - data["low"]).ewm(span=period, adjust=adjust).mean()
)
volatility_value = pd.Series(
price_range.ewm(span=period, adjust=adjust).mean(), name="vol_val"
)
upper_band = pd.Series((volatility_value * dev_factor) + MA, name="UPPER")
lower_band = pd.Series(MA - (volatility_value * dev_factor), name="LOWER")
return pd.concat([upper_band, lower_band], axis=1)
# ------------------> B <------------------------
# [0] __ Bollinger Bands (BBANDS)
# TODO
def bbands(data: DataFrame,period: int = 20,MA: Series = None,
column: str = "close",std_multiplier: float = 2,) -> DataFrame:
std = data[column].rolling(window=period).std()
if not isinstance(MA, pd.core.series.Series):
middle_band = pd.Series(sma(data, period), name="BB_MIDDLE")
else:
middle_band = pd.Series(MA, name="BB_MIDDLE")
upper_bb = pd.Series(middle_band + (std_multiplier * std), name="BB_UPPER")
lower_bb = pd.Series(middle_band - (std_multiplier * std), name="BB_LOWER")
return pd.concat([upper_bb, middle_band, lower_bb], axis=1)
# [0] __ Bollinger Bands Width (BBWidth)
# TODO
def bbwidth(
data: DataFrame, period: int = 20, MA: Series = None, column: str = "close"
) -> Series:
BB = bbands(data, period, MA, column)
return pd.Series(
(BB["BB_UPPER"] - BB["BB_LOWER"]) / BB["BB_MIDDLE"],
name="{0} period BBWITH".format(period),
)
# ------------------> D <------------------------
# [0] __ Double Exponential Moving Average (DEMA)
# 2 * EWMA - ewm(EWMA)
def dema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
DEMA = (
2*ema(data,period) - ema(data,period).ewm(span=period , adjust=adjust).mean()
)
return pd.Series(
DEMA ,
name = f'{period}_DEMA'
)
# [0] __ Directional Movement Index (DMI)
# TODO
def dmi(data: DataFrame, column: str = "close", adjust: bool = True) -> Series:
def _get_time(close):
sd = close.rolling(5).std()
asd = sd.rolling(10).mean()
v = sd / asd
t = 14 / v.round()
t[t.isna()] = 0
t = t.map(lambda x: int(min(max(x, 5), 30)))
return t
def _dmi(index):
time = t.iloc[index]
if (index - time) < 0:
subset = data.iloc[0:index]
else:
subset = data.iloc[(index - time) : index]
return rsi(subset, period=time, adjust=adjust).values[-1]
dates = Series(data.index)
periods = Series(range(14, len(dates)), index=dates.index[14:].values)
t = _get_time(data[column])
return periods.map(lambda x: _dmi(x))
# ------------------> E <------------------------
# [0] __ Exponential Weighted Moving Average (EWMA) or Exponential Moving Average(EMA)
# Exponential average of prev n day prices
def ema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
return pd.Series(
data[column].ewm(span=period, adjust=adjust).mean(),
name = f'{period}_EMA'
)
# [0] __ Kaufman Efficiency indicator (KER) or (ER)
# change in price / volatility Here change and volatility are absolute
def er(data : DataFrame,period: int = 10,column: str ='close') -> Series:
change = data[column].diff(period).abs()
volatility = data[column].diff().abs().rolling(window=period,min_periods=1).sum()
return pd.Series(change / volatility,
name=f'{period}_ER'
)
# [0] __ TODO (EVSTC)
# TODO
def evstc(data: DataFrame,period_fast: int = 12,period_slow: int = 30,
k_period: int = 10,d_period: int = 3,adjust: bool = True) -> Series:
ema_slow = evwma(data, period_slow)
ema_fast = evwma(data, period_fast)
macd = ema_fast - ema_slow
STOK = pd.Series((
(macd - macd.rolling(window=k_period).min())
/ (macd.rolling(window=k_period).max() - macd.rolling(window=k_period).min())
) * 100)
STOD = STOK.rolling(window=d_period).mean()
STOD_DoubleSmooth = STOD.rolling(window=d_period).mean()
return pd.Series(STOD_DoubleSmooth, name="{0} period EVSTC".format(k_period))
# [0] __ Elastic Volume Weighted Moving Average (EVWMA)
# x is ((volume sum for n period) - volume ) divided by (volume sum for n period)
# y is volume * close / (volume sum for n period)
def evwma(data, period: int = 20) -> Series:
vol_sum = (data["volume"].rolling(window=period,min_periods=1).sum())
x = (vol_sum - data["volume"]) / vol_sum
y = (data["volume"] * data["close"]) / vol_sum
evwma = [0]
for x, y in zip(x.fillna(0).iteritems(), y.iteritems()):
if x[1] == 0 or y[1] == 0:
evwma.append(0)
else:
evwma.append(evwma[-1] * x[1] + y[1])
return pd.Series(
evwma[1:], index=data.index,
name=f'{period}_EVWMA'
)
# [0] __ Elastic Volume Weighted Moving average convergence divergence (EV_MACD)
# MACD calculation on basis of Elastic Volume Weighted Moving average (EVWMA)
def ev_macd(data: DataFrame,period_fast: int = 20,period_slow: int = 40,
signal: int = 9,adjust: bool = True,) -> DataFrame:
evwma_slow = evwma(data, period_slow)
evwma_fast = evwma(data, period_fast)
MACD = pd.Series(evwma_fast - evwma_slow, name="EV MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
# ------------------> F <------------------------
# [0] __ Fisher Transform
# TODO
def fish(data: DataFrame, period: int = 10, adjust: bool = True) -> Series:
from numpy import log, seterr
seterr(divide="ignore")
med = (data["high"] + data["low"]) / 2
ndaylow = med.rolling(window=period).min()
ndayhigh = med.rolling(window=period).max()
raw = (2 * ((med - ndaylow) / (ndayhigh - ndaylow))) - 1
smooth = raw.ewm(span=5, adjust=adjust).mean()
_smooth = smooth.fillna(0)
return pd.Series(
(log((1 + _smooth) / (1 - _smooth))).ewm(span=3, adjust=adjust).mean(),
name="{0} period FISH.".format(period),
)
# [0] __ Fractal Adaptive Moving Average (FRAMA)
# TODO
def FRAMA(data: DataFrame, period: int = 16, batch: int=10) -> Series:
assert period % 2 == 0, print("FRAMA period must be even")
c = data.close.copy()
window = batch * 2
hh = c.rolling(batch).max()
ll = c.rolling(batch).min()
n1 = (hh - ll) / batch
n2 = n1.shift(batch)
hh2 = c.rolling(window).max()
ll2 = c.rolling(window).min()
n3 = (hh2 - ll2) / window
# calculate fractal dimension
D = (np.log(n1 + n2) - np.log(n3)) / np.log(2)
alp = np.exp(-4.6 * (D - 1))
alp = np.clip(alp, .01, 1).values
filt = c.values
for i, x in enumerate(alp):
cl = c.values[i]
if i < window:
continue
filt[i] = cl * x + (1 - x) * filt[i - 1]
return pd.Series(filt, index=data.index,
name= f'{period} FRAMA'
)
# [0] __ Finite Volume Element (FVE)
# TODO
def fve(data: DataFrame, period: int = 22, factor: int = 0.3) -> Series:
hl2 = (data["high"] + data["low"]) / 2
tp_ = tp(data)
smav = data["volume"].rolling(window=period).mean()
mf = pd.Series((data["close"] - hl2 + tp_.diff()), name="mf")
_mf = pd.concat([data["close"], data["volume"], mf], axis=1)
def vol_shift(row):
if row["mf"] > factor * row["close"] / 100:
return row["volume"]
elif row["mf"] < -factor * row["close"] / 100:
return -row["volume"]
else:
return 0
_mf["vol_shift"] = _mf.apply(vol_shift, axis=1)
_sum = _mf["vol_shift"].rolling(window=period).sum()
return pd.Series((_sum / smav) / period * 100)
# ------------------> H <------------------------
# [0] __ Hull Moving Average (HMA)
# wma of change in wma where change in wma is 2 * (wma half period) - (wma full period)
def hma(data, period: int = 16) -> Series:
half_length = int(period / 2)
sqrt_length = int(math.sqrt(period))
wmaf = wma(data, period=half_length)
wmas = wma(data, period=period)
data["deltawma"] = 2 * wmaf - wmas
hma = wma(data, column="deltawma", period=sqrt_length)
return pd.Series(hma, name=f'{period}_HMA')
# ------------------> I <------------------------
# [0] __ Ichimoku Cloud
# TODO
def ichimoku(data: DataFrame,tenkan_period: int = 9,kijun_period: int = 26,
senkou_period: int = 52,chikou_period: int = 26,) -> DataFrame:
tenkan_sen = pd.Series(
(
data["high"].rolling(window=tenkan_period).max()
+ data["low"].rolling(window=tenkan_period).min()
)
/ 2,
name="TENKAN",
) ## conversion line
kijun_sen = pd.Series(
(
data["high"].rolling(window=kijun_period).max()
+ data["low"].rolling(window=kijun_period).min()
)
/ 2,
name="KIJUN",
) ## base line
senkou_span_a = pd.Series(
((tenkan_sen + kijun_sen) / 2), name="senkou_span_a"
) .shift(kijun_period) ## Leading span
senkou_span_b = pd.Series(
(
(
data["high"].rolling(window=senkou_period).max()
+ data["low"].rolling(window=senkou_period).min()
)
/ 2
),
name="SENKOU",
).shift(kijun_period)
chikou_span = pd.Series(
data["close"].shift(-chikou_period),
name="CHIKOU",
)
return pd.concat(
[tenkan_sen, kijun_sen, senkou_span_a, senkou_span_b, chikou_span], axis=1
)
# [0] __ Inverse Fisher Transform (IFTRSI)
# TODO
def ift_rsi(data: DataFrame,column: str = "close",rsi_period: int = 5,
wma_period: int = 9,) -> Series:
v1 = pd.Series(0.1 * (rsi(data, rsi_period) - 50), name="v1")
d = (wma_period * (wma_period + 1)) / 2
weights = np.arange(1, wma_period + 1)
def linear(w):
def _compute(x):
return (w * x).sum() / d
return _compute
_wma = v1.rolling(wma_period, min_periods=wma_period)
v2 = _wma.apply(linear(weights), raw=True)
return pd.Series(
((v2 ** 2 - 1) / (v2 ** 2 + 1)),
name="IFT_RSI"
)
# ------------------> K <------------------------
# [0] __ Kaufman's Adaptive Moving Average (KAMA)
# first KAMA is SMA
# Current KAMA = Previous KAMA + smoothing_constant * (Price - Previous KAMA)
def kama(data,er_: int = 10,ema_fast: int = 2,
ema_slow: int = 30,period: int = 20,
column: str ='close') -> Series:
er_ = er(data)
fast_alpha = 2 / (ema_fast + 1)
slow_alpha = 2 / (ema_slow + 1)
sc = pd.Series(
(er_ * (fast_alpha - slow_alpha) + slow_alpha) ** 2,
name="smoothing_constant",
)
sma = pd.Series(
data[column].rolling(period).mean(), name="SMA"
)
kama = []
for s, ma, price in zip(
sc.iteritems(), sma.shift().iteritems(), data[column].iteritems()
):
try:
kama.append(kama[-1] + s[1] * (price[1] - kama[-1]))
except (IndexError, TypeError):
if pd.notnull(ma[1]):
kama.append(ma[1] + s[1] * (price[1] - ma[1]))
else:
kama.append(None)
sma["KAMA"] = pd.Series(
kama, index=sma.index, name=f'{period}_KAMA')
return sma['KAMA']
# [0] __ Keltner Channels (KC)
# TODO
def kc(ohlc: DataFrame,period: int = 20,atr_period: int = 10,
MA: Series = None,kc_mult: float = 2,) -> DataFrame:
if not isinstance(MA, pd.core.series.Series):
middle = pd.Series(ema(ohlc, period), name="KC_MIDDLE")
else:
middle = pd.Series(MA, name="KC_MIDDLE")
up = pd.Series(middle + (kc_mult * atr(ohlc, atr_period)), name="KC_UPPER")
down = pd.Series(
middle - (kc_mult * atr(ohlc, atr_period)), name="KC_LOWER"
)
return pd.concat([up, down], axis=1)
# ------------------> M <------------------------
# [0] __ Moving average convergence divergence (MACD)
# MACD is Difference of ema fast and ema slow
# Here fast period is 12 and slow period is 26
# MACD Signal is ewm of MACD
def macd(data,period_fast: int = 12,period_slow: int = 26,
signal: int = 9,column: str = "close",adjust: bool = True
) -> DataFrame:
EMA_fast = pd.Series(
data[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name=f'{period_fast}_EMA_fast')
EMA_slow = pd.Series(
data[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name=f'{period_slow}_EMA_slow')
MACD = pd.Series(EMA_fast - EMA_slow,name='MACD')
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(),name=f'{signal}_SIGNAL'
)
DIFF = pd.Series(
MACD - MACD_signal,
name="diff MACD_MSIGNAL"
)
return pd.concat(
[DIFF, MACD, MACD_signal ],
axis=1
)
# [0] __ Moving Standard Deviation (MSD)
# Standard deviation of a given period for the column passed as arguement
def msd(data: DataFrame, period: int = 21, column: str = "close") -> Series:
return pd.Series(data[column].rolling(period).std(), name="MSD")
# Momentum Breakout Bands (MOBO)
# TODO
def mobo(data: DataFrame,period: int = 10,std_multiplier: float = 0.8,
column: str = "close",) -> DataFrame:
BB = bbands(data, period=10, std_multiplier=0.8, column=column)
return BB
# [0] __ Market momentum (MOM)
def mom(data: DataFrame, period: int = 10, column: str = "close") -> Series:
return pd.Series(data[column].diff(period),
name=f'{period}_MOM'
)
# [0] __ Moving Volume Weighted Average Price (MVWAP)
# SMA of (close * volume ) divided by SMA of volume
def mvwap(data: DataFrame, period:int = 9) -> Series:
data["cv"] =(data["close"] * data["volume"])
return pd.Series(
(sma(data,period = period,column = "cv")/sma(data,period=period,column="volume")),
name="MVWAP."
)
# ------------------> P <------------------------
# ------------|| Pivot ||------------------------
# [0] __ Pivot Camarilla
# TODO
def pivot_camarilla(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
s2 = df_['close']-(1.1*(df_['high']-df_['low'])/6)
s3 = df_['close']-(1.1*(df_['high']-df_['low'])/4)
s4 =df_['close']-(1.1*(df_['high']-df_['low'])/2)
r1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
r2 = df_['close']+(1.1*(df_['high']-df_['low'])/6)
r3 =df_['close']+(1.1*(df_['high']-df_['low'])/4)
r4 = df_['close']+(1.1*(df_['high']-df_['low'])/2)
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Classic
# TODO
def pivot_classic(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = (pivot * 2) - df_["high"]
s2 = pivot - (df_["high"] - df_["low"])
s3 = pivot - 2*(df_["high"] - df_["low"])
s4 = pivot - 3*(df_["high"] - df_["low"])
r1 = (pivot * 2) - df_["low"]
r2 = pivot + (df_["high"] - df_["low"])
r3 = pivot + 2*(df_["high"] - df_["low"])
r4 = pivot + 3*(df_["high"] - df_["low"])
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Demark
# TODO
def pivot_demark(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot,s1,r1=[],[],[]
for i in range(len(df_)):
if df_['open'][i]==df_['close'][i]:
x=df_['high'][i]+df_['low'][i]+2*df_['close'][i]
elif df_['close'][i]>df_['open'][i]:
x=2*df_['high'][i]+df_['low'][i]+df_['close'][i]
else:
x=df_['high'][i]+2*df_['low'][i]+df_['close'][i]
pivot.append(x/4)
s1.append(x/2 - df_["high"][i])
r1.append(x/2 - df_["low"][i])
data_ = pd.DataFrame(pivot,columns=['pivot'])
data_['s1']=s1
data_['r1']=r1
return data_
# [0] __ Pivot Fibonacci
# TODO
def pivot_fibonacci(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = pivot - ((df_["high"] - df_["low"])*0.382)
s2 = pivot - ((df_["high"] - df_["low"])*0.618)
s3 = pivot - (df_["high"] - df_["low"])
s4 = pivot + ((df_["high"] - df_["low"])*1.382)
r1 = pivot + ((df_["high"] - df_["low"])*0.382)
r2 = pivot + ((df_["high"] - df_["low"])*0.618)
r3 =pivot + (df_["high"] - df_["low"])
r4 = pivot + (df_["high"] - df_["low"])*1.382
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Traditional
# TODO
def pivot_traditional(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = (pivot * 2) - df_["high"]
s2 = pivot - (df_["high"] - df_["low"])
s3 = df_["low"] - (2 * (df_["high"] - pivot))
s4 = df_["low"] - (3 * (df_["high"] - pivot))
s5 = df_["low"] - (4 * (df_["high"] - pivot))
r1 = (pivot * 2) - df_["low"]
r2 = pivot + (df_["high"] - df_["low"])
r3 = df_["high"] + (2 * (pivot - df_["low"]))
r4 = df_["high"] + (3 * (pivot - df_["low"]))
r5 = df_["high"] + (4 * (pivot - df_["low"]))
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(s5, name="s5"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
pd.Series(r5, name="r5"),
],
axis=1,
)
# [0] __ Pivot Woodie
# TODO
def pivot_woodie(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series((df_['high']+df_['low']+2*data['open'])/4, name="pivot")
s1 = 2*pivot-df_['high']
s2 = pivot - (df_["high"] - df_["low"])
s3 = df_["low"] - (2 * (pivot - df_["high"]))
s4 = s3 - (df_["high"] - df_["low"])
r1 = 2*pivot-df_['low']
r2 = pivot + (df_["high"] - df_["low"])
r3 =df_["high"] + (2 * (pivot - df_["low"]))
r4 = r3 + (df_["high"] - df_["low"])
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ PPO
# TODO
def ppo(data: DataFrame,period_fast: int = 12,period_slow: int = 26,
signal: int = 9,column: str = "close",
adjust: bool = True,) -> DataFrame:
EMA_fast = pd.Series(
data[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name="EMA_fast",
)
EMA_slow = pd.Series(
data[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name="EMA_slow",
)
PPO = pd.Series(((EMA_fast - EMA_slow) / EMA_slow) * 100, name="PPO")
PPO_signal = pd.Series(
PPO.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
PPO_histo = pd.Series(PPO - PPO_signal, name="HISTO")
return pd.concat([PPO, PPO_signal, PPO_histo], axis=1)
# ------------------> R <------------------------
# [0] __ Relative Strength Index (RSI)
# EMA of up and down gives gain and loss
# Relative Strength Index is gain / loss
def rsi(data: DataFrame, period: int = 14,column: str = "close",
adjust: bool = True,) -> Series:
delta = data[column].diff()
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
_gain = up.ewm(alpha=1.0 / period, adjust=adjust).mean()
_loss = down.abs().ewm(alpha=1.0 / period, adjust=adjust).mean()
RS = _gain / _loss
return pd.Series(100 - (100 / (1 + RS)),
name=f'{period} period RSI'
)
# [0] __ Rate of Change (ROC)
def roc(data: DataFrame, period: int = 12, column: str = "close") -> Series:
return pd.Series(
(data[column].diff(period) / data[column].shift(period)) * 100,
name="ROC"
)
# ------------------> S <------------------------
# [0] __ Stop And Reverse (SAR)
# The indicator is below prices when prices are rising and above
# prices when prices are falling.
# TODO
def sar(data: DataFrame, af: int = 0.02, amax: int = 0.2) -> Series:
high, low = data.high, data.low
# Starting values
sig0, xpt0, af0 = True, high[0], af
_sar = [low[0] - (high - low).std()]
for i in range(1, len(data)):
sig1, xpt1, af1 = sig0, xpt0, af0
lmin = min(low[i - 1], low[i])
lmax = max(high[i - 1], high[i])
if sig1:
sig0 = low[i] > _sar[-1]
xpt0 = max(lmax, xpt1)
else:
sig0 = high[i] >= _sar[-1]
xpt0 = min(lmin, xpt1)
if sig0 == sig1:
sari = _sar[-1] + (xpt1 - _sar[-1]) * af1
af0 = min(amax, af1 + af)
if sig0:
af0 = af0 if xpt0 > xpt1 else af1
sari = min(sari, lmin)
else:
af0 = af0 if xpt0 < xpt1 else af1
sari = max(sari, lmax)
else:
af0 = af
sari = xpt0
_sar.append(sari)
return pd.Series(_sar, index=data.index)
# [0] __ Simple moving average (SMA) or moving average (MA)
# Average of prev n day prices
def sma(data,period: int = 10,column: str ='close') -> Series:
return pd.Series(
data[column].rolling(window = period,min_periods= 1).mean(),
name = f'{period}_SMA'
)
# [0] __ Simple moving median (SMM) or moving median (MM)
# median of prev n day prices
def smm(data,period: int = 10,column: str ='close') -> Series:
return pd.Series(
data[column].rolling(window = period,min_periods= 1).median(),
name = f'{period}_SMM'
)
# [0] __ Simple smoothed moving average (SSMA) or smoothed moving average()
# smoothed (exponential + simple) average of prev n day prices
def ssma(data,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
return pd.Series(
data[column].ewm(ignore_na = False, alpha=1.0/period,
min_periods=0, adjust=adjust).mean(),
name = f'{period}_SSMA'
)
# [0] __ The Schaff Trend Cycle (Oscillator) (STC)
# TODO
def stc(data: DataFrame,period_fast: int = 23,period_slow: int = 50,k_period: int = 10,
d_period: int = 3,column: str = "close",adjust: bool = True) -> Series:
EMA_fast = pd.Series(
data[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name="EMA_fast",
)
EMA_slow = pd.Series(
data[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name="EMA_slow",
)
MACD = pd.Series((EMA_fast - EMA_slow), name="MACD")
STOK = pd.Series((
(MACD - MACD.rolling(window=k_period).min())
/ (MACD.rolling(window=k_period).max() - MACD.rolling(window=k_period).min())
) * 100)
STOD = STOK.rolling(window=d_period).mean()
STOD_DoubleSmooth = STOD.rolling(window=d_period).mean() # "double smoothed"
return pd.Series(STOD_DoubleSmooth, name="{0} period STC".format(k_period))
# [0] __ (SQZMI)
# TODO
def sqzmi(data: DataFrame, period: int = 20, MA: Series = None) -> DataFrame:
if not isinstance(MA, pd.core.series.Series):
ma = pd.Series(sma(data, period))
else:
ma = None
bb = bbands(data, period=period, MA=ma)
kc_ = kc(data, period=period, kc_mult=1.5)
comb = pd.concat([bb, kc_], axis=1)
def sqz_on(row):
if row["BB_LOWER"] > row["KC_LOWER"] and row["BB_UPPER"] < row["KC_UPPER"]:
return True
else:
return False
comb["SQZ"] = comb.apply(sqz_on, axis=1)
return pd.Series(comb["SQZ"], name="{0} period SQZMI".format(period))
# ------------------> T <------------------------
# [0] __ Triple Exponential Moving Average (TEMA)
# 3 * EWMA - ewm(ewm(ewm(data))) i.e. 3 * ewma - ewm of ewm of ewm of data
def tema(data,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
triple_ema = 3 * ema(data,period)
ema_ema_ema = (
ema(data,period).ewm(ignore_na = False, span = period, adjust = adjust).mean()
.ewm(ignore_na = False, span = period, adjust = adjust).mean()
)
TEMA = (
triple_ema - 3 * ema(data,period).ewm(span=period, adjust= adjust).mean() + ema_ema_ema
)
return pd.Series(
TEMA ,
name = f'{period}_TEMA'
)
# [0] __ Typical Price (TP)
# average of high low close price
def tp(data: DataFrame) -> Series:
return pd.Series(
(data["high"] + data["low"] + data["close"]) / 3,
name="TP"
)
# [0] __ True Range (TR)
# maximum of three price ranges i.e TR1, TR2, TR2
def tr(data: DataFrame) -> Series:
TR1 = pd.Series(data["high"] - data["low"]).abs()
TR2 = pd.Series(data["high"] - data["close"].shift()).abs()
TR3 = pd.Series(data["close"].shift() - data["low"]).abs()
_TR = pd.concat([TR1, TR2, TR3], axis=1)
_TR["TR"] = _TR.max(axis=1)
return pd.Series(_TR["TR"],
name="TR"
)
# [0] __ Triangular Moving Average (TRIMA) or (TMA)
# sum of SMA / period
def trima(data,period: int = 10,adjust: bool = True) -> Series:
SMA = sma(data,period).rolling(window=period , min_periods=1).sum()
return pd.Series(
SMA / period,
name = f'{period}_TRIMA'
)
# [0] __ Triple Exponential Average (TRIX)
# 1000*(m - mprev) / m Here m = ema(ema(ema(data))) or m = ema of ema of ema of data
def trix(data,period: int = 10,adjust: bool = True,column: str ='close') -> Series:
data_ = data[column]
def _ema(data_, period, adjust):
return pd.Series(data_.ewm(span=period, adjust=adjust).mean())
m = _ema(_ema(_ema(data_, period, adjust), period, adjust), period, adjust)
return pd.Series(
10000 * (m.diff() / m),
name = f'{period}_TRIX'
)
# ------------------> V <------------------------
# [0] __ Volume Adjusted Moving Average (VAMA)
# volume ratio = (price * volume) / mean of (price * volume) for n period
# cummulative sum = sum of (volume ratio * data) for n period
# cummulative Division = sum of (volume ratio) for n period
# VAMA = cummulative sum / cummulative Division
def vama(data,period: int = 10,column: str ='close') -> Series:
vp = data[column]*data['volume']
volsum = data["volume"].rolling(window=period,min_periods=1).mean()
volRatio = pd.Series(vp / volsum, name="VAMA")
cumSum = (volRatio * data[column]).rolling(window=period,min_periods=1).sum()
cumDiv = volRatio.rolling(window=period,min_periods=1).sum()
return pd.Series(
cumSum / cumDiv,
name=f'{period}_VAMA'
)
# [0] __ Volume Price Trend (VPT)
# TODO
def vpt(data: DataFrame) -> Series:
hilow = (data["high"] - data["low"]) * 100
openclose = (data["close"] - data["open"]) * 100
vol = data["volume"] / hilow
spreadvol = (openclose * vol).cumsum()
vpt = spreadvol + spreadvol
return pd.Series(vpt, name="VPT")
# [0] __ Volume Weighted Average Price (VWAP)
# cummulative sum of (data) divided by cummulative sum of volume
def vwap(data: DataFrame) -> Series:
return pd.Series(
((data["volume"] * tp(data)).cumsum()) / data["volume"].cumsum(),
name="VWAP",
)
# [0] __ Volume Weighted Moving average convergence divergence(VWMACD)
# difference vwma of fast and slow
def vw_macd(data: DataFrame,period_fast: int = 12,period_slow: int = 26,
signal: int = 9,column: str = "close",
adjust: bool = True,
) -> DataFrame:
MACD = pd.Series(vwma(data,period=period_fast)-vwma(data,period=period_slow),
name="VW MACD")
print(MACD)
MACD_signal = pd.Series(
MACD.ewm(span=signal, adjust=adjust).mean(),
name="MACD Signal"
)
return pd.concat([MACD, MACD_signal], axis=1)
# [0] __ Volume Weighted Moving Average (VWMA)
# sum of (data * volume) for n period divided by
# sum of volume for n period
def vwma(data: DataFrame,period: int = 20,column: str = "close",
adjust: bool = True,
) -> DataFrame:
cv=(data[column]*data['volume']).rolling(window=period,min_periods=1).sum()
v=data['volume'].rolling(window=period,min_periods=1).sum()
return pd.Series(cv/v,name='VWMA')
# ------------------> V <------------------------
# [0] __ Volume Flow Indicator (VFI)
# TODO
def vfi(data: DataFrame,period: int = 130,smoothing_factor: int = 3,factor: int = 0.2,
vfactor: int = 2.5,adjust: bool = True,) -> Series:
typical = tp(data)
inter = typical.apply(np.log).diff()
vinter = inter.rolling(window=30).std()
cutoff = pd.Series(factor * vinter * data["close"], name="cutoff")
price_change = pd.Series(typical.diff(), name="pc")
mav = pd.Series(
data["volume"].rolling(center=False, window=period).mean(), name="mav",
)
_va = pd.concat([data["volume"], mav.shift()], axis=1)
_mp = pd.concat([price_change, cutoff], axis=1)
_mp.fillna(value=0, inplace=True)
def _vol_added(row):
if row["volume"] > vfactor * row["mav"]:
return vfactor * row["mav"]
else:
return row["volume"]
added_vol = _va.apply(_vol_added, axis=1)
def _multiplier(row):
if row["pc"] > row["cutoff"]:
return 1
elif row["pc"] < 0 - row["cutoff"]:
return -1
else:
return 0
multiplier = _mp.apply(_multiplier, axis=1)
raw_sum = (multiplier * added_vol).rolling(window=period).sum()
raw_value = raw_sum / mav.shift()
vfi = pd.Series(
raw_value.ewm(
ignore_na=False,
min_periods=smoothing_factor - 1,
span=smoothing_factor,
adjust=adjust,
).mean(),
name="VFI",
)
return vfi
# [0] __ Value chart (VC)
# TODO
def vc(data: DataFrame, period: int = 5) -> DataFrame:
float_axis = ((data.high + data.low) / 2).rolling(window=period).mean()
vol_unit = (data.high - data.low).rolling(window=period).mean() * 0.2
value_chart_high = pd.Series((data.high - float_axis) / vol_unit, name="Value Chart High")
value_chart_low = | pd.Series((data.low - float_axis) / vol_unit, name="Value Chart Low") | pandas.Series |
import spacy
import pandas as pd
from nltk.tokenize import word_tokenize
| pd.set_option('display.max_columns', None) | pandas.set_option |
#!/usr/bin/env python3
import os
import stat
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
def getNumLines(filePath):
numLines = 0
with open(filePath, "r") as f:
for line in f:
numLines += 1
return numLines
if __name__ == "__main__":
pathArtifacts = sys.argv[1]
systems = [
"DAPHNE",
"SYSDS",
"SYSDSP",
"TF",
"TFXLA"
]
dfs = []
for system in systems:
csvPath = os.path.join(pathArtifacts,"{}_runtimes.csv".format(system))
csvPath2 = os.path.join(pathArtifacts,"{}-dataload.csv".format(system))
csvPath3 = os.path.join(pathArtifacts,"{}-scoring.csv".format(system))
df = pd.read_csv(csvPath, sep="\t")
df2 = | pd.read_csv(csvPath2, sep="\t") | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 27 21:48:06 2021
@author: <NAME>
"""
import pandas as pd
import re
import os
import requests
from config import MY_API_KEYS
from datetime import datetime
import string
import time
'''
1. Full-text collection from ScienceDirect
If you have any questions regarding to the full text collection script below, please contact <EMAIL>
Note: in order to use Elsevier APIs (ScienceDirect, Scopus, ...), you should have registered an API account at Elsevier Developer Portal and your
institution should subscribed some full text resources (e.g., journal).
Option 1: If you download the citation information from the Science Direct website, then go with the following option 1.
'''
meta_folder = 'name a folder to save meta data here'
# set directory
print('Getting directory...')
cwd = os.getcwd()
dir_meta = os.path.join(cwd, meta_folder)
dir_corpus = os.path.join(cwd, 'corpus')
# load the api key from config file
api_idx = 0
my_api_key = MY_API_KEYS[api_idx]
# if you download metafile manually from ScienceDirect website, then go with follows
def meta_data_processing(meta_directory, if_save=True):
# meta file processing
print("Processing meta data...")
target_dois = []
corresponding_titles = []
# we check each folder under the meta-file directory
for folder_file in os.listdir(meta_directory):
if '.txt' in folder_file:
with open(os.path.join(meta_directory, folder_file), 'r') as meta_ref:
# read the text content of each meta file
meta_data = meta_ref.read()
# split the text into individual records
meta_records = meta_data.split('\n\n')
for meta_record in meta_records:
# split each individual record to detailed info
meta_record = meta_record.split('\n')
# we record the title and doi number for download
for sub_record in meta_record:
if 'https://doi.org' in sub_record:
# add the doi number to the download list
target_dois += [sub_record]
# since title is the second line of each record
corresponding_titles += [meta_record[1]]
df_integrated_meta = | pd.DataFrame(columns=['doi', 'title']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from covsirphy.cleaning.cbase import CleaningBase
class CountryData(CleaningBase):
"""
Data cleaning of country level data.
"""
def __init__(self, filename, country):
"""
@country <str>: country name
"""
self._raw = | pd.read_csv(filename) | pandas.read_csv |
import numpy as np
import pandas as pd
import sqlite3
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from pickle import dump
####################
# uncomment the lines depending on your source (SQLite or csv)
# SQLLite
# connection = sqlite3.connect("loan_database") # connect to sql db
# df = pd.read_sql_query('SELECT * FROM joined_data;', connection)
# connection.execute("VACUUM;")
# from CSV (during development)
df = pd.read_csv('joined_data.csv', low_memory=False)
print('import done')
#####################
# replace special values with null based on Prosper documentation
# we aren't going to worry about mixed type features as a simplifying assumption
df.replace(to_replace=[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, 999],
value = np.nan,
inplace = True)
print("replaced special values")
# convert all column names to lowercase
df.columns = df.columns.str.strip().str.lower()
# drop some un-needed columns
# df.drop(['unnamed: 0', 'level_0', 'unnamed: 0.1'], inplace=True, axis=1)
df.drop(['unnamed: 0'], inplace=True, axis=1)
#drop Experian fields
exp_fields_to_drop = pd.read_excel('tu_exp_fields.xlsx', sheet_name='EXP')
exp_fields_to_drop = exp_fields_to_drop['Field']
df.drop(exp_fields_to_drop, inplace=True, axis=1)
# create year column & leave as string (will one-hot encode later)
df['year'] = df['loan_origination_date'].str[:4]
# store as a vector for filter later
year = df['loan_origination_date'].str[:4].astype(int)
# drop columns with 'date' in name since we have captured origination year
df.drop(df.filter(regex='date').columns, inplace=True, axis=1)
df.drop(df.filter(regex='paid').columns, inplace=True, axis=1)
print('Removed dates and paid columns')
# create training dataframe
# we still need to keep to the side to identify records later
loan_numbers = df['loan_number']
# create default flag vector
default_flag = np.where(df['loan_status'] == 2, 1, 0)
# remove columns we know are not known at origination or that we do not want in model
df.drop(['age_in_months', 'days_past_due', 'loan_number', 'days_past_due', 'principal_balance',
'debt_sale_proceeds_received', 'next_payment_due_amount', 'loan_default_reason',
'loan_default_reason_description', 'index', 'member_key', 'listing_number', 'amount_funded',
'amount_remaining', 'percent_funded', 'partial_funding_indicator', 'funding_threshold',
'estimated_return', 'estimated_loss_rate', 'lender_yield', 'effective_yield', 'listing_category_id',
'income_range', 'lender_indicator', 'group_indicator', 'group_name', 'channel_code',
'amount_participation', 'investment_typeid', 'investment_type_description', 'loan_status',
'loan_status_description', 'listing_status_reason', 'borrower_city', 'borrower_metropolitan_area',
'first_recorded_credit_line', 'investment_type_description', 'tuficorange', 'listing_term', 'listing_amount',
'borrower_apr']
, inplace=True
, axis=1)
# identify non numeric columns to one-hot encode
str_cols = list(df.select_dtypes(include=['object', 'string']).columns)
#print(str_cols)
# add loan term to features to one-hot encode. We want to treat as categorical since only three possible terms.
str_cols.append('term')
# write function to one-hot encode specific features
def encode_and_bind(original_dataframe, feature_to_encode):
dummies = pd.get_dummies(original_dataframe[[feature_to_encode]], dummy_na=True)
result = pd.concat([original_dataframe, dummies], axis=1)
result = result.drop([feature_to_encode], axis=1)
return result
# perform one hot encoding on string features
for feature in str_cols:
df = encode_and_bind(df, feature)
print('Finished One-Hot encoding')
# filter to 2017 and beyond since that is when TransUnion started being used
full_df = df
full_default_flag = default_flag
# default_flag = default_flag[df['year'].astype(int) >= 2017]
default_flag = default_flag[year >= 2017]
# df = df[df['year'].astype(int) >= 2017]
df = df[year >= 2017]
print('Finished filtering by year')
#capture feature names to ID later
feature_names = pd.Series(df.columns.values)
feature_names.to_csv('feature_names_considered.csv', index=False)
# dump(feature_names, open('feature_names.pkl', 'wb'))
# filter by prosper rating
df_AA = df[df['prosper_rating_AA'] == 1]
df_A = df[df['prosper_rating_A'] == 1]
df_B = df[df['prosper_rating_B'] == 1]
df_C = df[df['prosper_rating_C'] == 1]
df_D = df[df['prosper_rating_D'] == 1]
df_E = df[df['prosper_rating_E'] == 1]
df_HR = df[df['prosper_rating_HR'] == 1]
# convert to array to pass to the model
df_AA = df_AA.values
df_A = df_A.values
df_B = df_B.values
df_C = df_C.values
df_D = df_D.values
df_E = df_E.values
df_HR = df_HR.values
# Fill n/a and inf values with 0 now that missing flag is set
df_AA[~np.isfinite(df_AA)] = 0
df_A[~np.isfinite(df_A)] = 0
df_B[~np.isfinite(df_B)] = 0
df_C[~np.isfinite(df_C)] = 0
df_D[~np.isfinite(df_D)] = 0
df_E[~np.isfinite(df_E)] = 0
df_HR[~np.isfinite(df_HR)] = 0
print('Defined model datasets done')
# start modeling
# define model hyperparameters and cv
def logistic_cv(x_train, y_true, class_wgts, folds=5, regs = [.05], max_iterations=500):
return LogisticRegressionCV(Cs=regs, cv=folds, penalty='l1', class_weight=class_wgts, scoring='f1',
max_iter=max_iterations, solver='saga', random_state=1990).fit(x_train, y_true)
# find optimal class weights and regularization strength
weights = np.linspace(0.04, 0.07, 4)
regs = [.01, .05, .1]
gsc = GridSearchCV(
estimator=LogisticRegression(),
param_grid={
'class_weight': [{0: x, 1: 1.0-x} for x in weights], 'C': regs, 'penalty': ['l1'], 'random_state': [1990],
'solver': ['saga'], 'max_iter': [750]
},
scoring='f1',
cv=3
)
# prosper rating AA
scaler_AA = StandardScaler().fit(df_AA)
train = scaler_AA.transform(df_AA)
y = default_flag[df['prosper_rating_AA'] == 1]
model_AA = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.01], max_iterations=750)
features_AA = np.where(model_AA.coef_ != 0)
print('The AA model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_AA[1].astype(int)],
model_AA.coef_[np.where(model_AA.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_AA, open('model_AA.pkl', 'wb'))
# dump(scaler_AA, open('scaler_AA.pkl', 'wb'))
# prosper rating A
scaler_A = StandardScaler().fit(df_A)
train = scaler_A.transform(df_A)
y = default_flag[df['prosper_rating_A'] == 1]
# model_A = gsc.fit(train, y)
model_A = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.01], max_iterations=750)
features_A = np.where(model_A.coef_ != 0)
print('The A model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_A[1].astype(int)],
model_A.coef_[np.where(model_A.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_A, open('model_A.pkl', 'wb'))
# dump(scaler_A, open('scaler_A.pkl', 'wb'))
# prosper rating B
scaler_B = StandardScaler().fit(df_B)
train = scaler_B.transform(df_B)
y = default_flag[df['prosper_rating_B'] == 1]
model_B = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.01], max_iterations=500)
# model_B = gsc.fit(train, y)
features_B = np.where(model_B.coef_ != 0)
print('The B model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_B[1].astype(int)],
model_B.coef_[np.where(model_B.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_B, open('model_B.pkl', 'wb'))
# dump(scaler_B, open('scaler_B.pkl', 'wb'))
# prosper rating C
scaler_C = StandardScaler().fit(df_C)
train = scaler_C.transform(df_C)
y = default_flag[df['prosper_rating_C'] == 1]
model_C = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.01], max_iterations=500)
# model_C = gsc.fit(train, y)
features_C = np.where(model_C.coef_ != 0)
print('The C model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_C[1].astype(int)],
model_C.coef_[np.where(model_C.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_C, open('model_C.pkl', 'wb'))
# dump(scaler_C, open('scaler_C.pkl', 'wb'))
# prosper rating D
scaler_D = StandardScaler().fit(df_D)
train = scaler_D.transform(df_D)
y = default_flag[df['prosper_rating_D'] == 1]
model_D = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.01], max_iterations=750)
# model_D = gsc.fit(train, y)
features_D = np.where(model_D.coef_ != 0)
print('The D model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_D[1].astype(int)],
model_D.coef_[np.where(model_D.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_D, open('model_D.pkl', 'wb'))
# dump(scaler_D, open('scaler_D.pkl', 'wb'))
# prosper rating E
scaler_E = StandardScaler().fit(df_E)
train = scaler_E.transform(df_E)
y = default_flag[df['prosper_rating_E'] == 1]
model_E = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.05])
#model_E = gsc.fit(train, y)
features_E = np.where(model_E.coef_ != 0)
print('The E model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_E[1].astype(int)],
model_E.coef_[np.where(model_E.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_E, open('model_E.pkl', 'wb'))
# dump(scaler_E, open('scaler_E.pkl', 'wb'))
# prosper rating HR
scaler_HR = StandardScaler().fit(df_HR)
train = scaler_HR.transform(df_HR)
y = default_flag[df['prosper_rating_HR'] == 1]
model_HR = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.1], max_iterations = 1000)
# model_HR = gsc.fit(train, y)
features_HR = np.where(model_HR.coef_ != 0)
print('The HR model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_HR[1].astype(int)],
model_HR.coef_[np.where(model_HR.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_HR, open('model_HR.pkl', 'wb'))
# dump(scaler_HR, open('scaler_HR.pkl', 'wb'))
### PROBABILITIES ARE BIASED, BUT CAN BE USED FOR THRESHOLDS
full_df[~np.isfinite(full_df)] = 0
train = full_df
pred = dict.fromkeys(['AA', 'A', 'B', 'C', 'D', 'E', 'HR', 'nan'])
pred['AA'] = model_AA.predict_proba(scaler_AA.transform(train[train['prosper_rating_AA'] == 1].values))[:, 1]
pred['A'] = model_A.predict_proba(scaler_A.transform(train[train['prosper_rating_A'] == 1].values))[:, 1]
pred['B'] = model_B.predict_proba(scaler_B.transform(train[train['prosper_rating_B'] == 1].values))[:, 1]
pred['C'] = model_C.predict_proba(scaler_C.transform(train[train['prosper_rating_C'] == 1].values))[:, 1]
pred['D'] = model_D.predict_proba(scaler_D.transform(train[train['prosper_rating_D'] == 1].values))[:, 1]
pred['E'] = model_E.predict_proba(scaler_E.transform(train[train['prosper_rating_E'] == 1].values))[:, 1]
pred['HR'] = model_HR.predict_proba(scaler_HR.transform(train[train['prosper_rating_HR'] == 1].values))[:, 1]
pred['nan'] = model_C.predict_proba(scaler_C.transform(train[train['prosper_rating_nan'] == 1].values))[:, 1]
pred['AA'] = pd.qcut(pred['AA'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')
pred['A'] = pd.qcut(pred['A'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')
pred['B'] = | pd.qcut(pred['B'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop') | pandas.qcut |
from IPython.display import display
import pandas as pd
import pyomo.environ as pe
import numpy as np
import csv
import os
import shutil
class inosys:
def __init__(self, inp_folder, ref_bus, dshed_cost = 1000000, rshed_cost = 500, phase = 3, vmin=0.85, vmax=1.15, sbase = 1, sc_fa = 1):
'''
Initialise the investment and operation problem.
:param str inp_folder: The input directory for the data. It expects to find several CSV files detailing the system input data (Default current folder)
:param float dshed_cost: Demand Shedding Price (Default 1000000)
:param float rshed_cost: Renewable Shedding Price (Default 500)
:param int phase: Number of Phases (Default 3)
:param float vmin: Minimum node voltage (Default 0.85)
:param float vmax: Maximum node voltage (Default 1.15)
:param float sbase: Base Apparent Power (Default 1 kW)
:param int ref_bus: Reference node
:param float sc_fa: Scaling Factor (Default 1)
:Example:
>>> import pyeplan
>>> sys_inv = pyeplan.inosys("wat_inv", ref_bus = 260)
'''
self.cgen = pd.read_csv(inp_folder + os.sep + 'cgen_dist.csv')
self.egen = pd.read_csv(inp_folder + os.sep + 'egen_dist.csv')
self.csol = pd.read_csv(inp_folder + os.sep + 'csol_dist.csv')
self.esol = pd.read_csv(inp_folder + os.sep + 'esol_dist.csv')
self.cwin = pd.read_csv(inp_folder + os.sep + 'cwin_dist.csv')
self.ewin = pd.read_csv(inp_folder + os.sep + 'ewin_dist.csv')
self.cbat = pd.read_csv(inp_folder + os.sep + 'cbat_dist.csv')
self.elin = pd.read_csv(inp_folder + os.sep + 'elin_dist.csv')
self.pdem = pd.read_csv(inp_folder + os.sep + 'pdem_dist.csv')
self.qdem = pd.read_csv(inp_folder + os.sep + 'qdem_dist.csv')
self.prep = pd.read_csv(inp_folder + os.sep + 'prep_dist.csv')
self.qrep = pd.read_csv(inp_folder + os.sep + 'qrep_dist.csv')
self.psol = pd.read_csv(inp_folder + os.sep + 'psol_dist.csv')
self.qsol = pd.read_csv(inp_folder + os.sep + 'qsol_dist.csv')
self.pwin = pd.read_csv(inp_folder + os.sep + 'pwin_dist.csv')
self.qwin = pd.read_csv(inp_folder + os.sep + 'qwin_dist.csv')
self.dtim = pd.read_csv(inp_folder + os.sep + 'dtim_dist.csv')
self.cgen['pmin'] = self.cgen['pmin'].div(sbase)
self.cgen['pmax'] = self.cgen['pmax'].div(sbase)
self.cgen['qmin'] = self.cgen['qmin'].div(sbase)
self.cgen['qmax'] = self.cgen['qmax'].div(sbase)
self.egen['pmin'] = self.egen['pmin'].div(sbase)
self.egen['pmax'] = self.egen['pmax'].div(sbase)
self.egen['qmin'] = self.egen['qmin'].div(sbase)
self.egen['qmax'] = self.egen['qmax'].div(sbase)
self.csol['pmin'] = self.csol['pmin'].div(sbase)
self.csol['pmax'] = self.csol['pmax'].div(sbase)
self.csol['qmin'] = self.csol['qmin'].div(sbase)
self.csol['qmax'] = self.csol['qmax'].div(sbase)
self.esol['pmin'] = self.esol['pmin'].div(sbase)
self.esol['pmax'] = self.esol['pmax'].div(sbase)
self.esol['qmin'] = self.esol['qmin'].div(sbase)
self.esol['qmax'] = self.esol['qmax'].div(sbase)
self.cwin['pmin'] = self.cwin['pmin'].div(sbase)
self.cwin['pmax'] = self.cwin['pmax'].div(sbase)
self.cwin['qmin'] = self.cwin['qmin'].div(sbase)
self.cwin['qmax'] = self.cwin['qmax'].div(sbase)
self.ewin['pmin'] = self.ewin['pmin'].div(sbase)
self.ewin['pmax'] = self.ewin['pmax'].div(sbase)
self.ewin['qmin'] = self.ewin['qmin'].div(sbase)
self.ewin['qmax'] = self.ewin['qmax'].div(sbase)
self.cbat['emin'] = self.cbat['emin'].div(sbase)
self.cbat['emax'] = self.cbat['emax'].div(sbase)
self.cbat['eini'] = self.cbat['eini'].div(sbase)
self.cbat['pmin'] = self.cbat['pmin'].div(sbase)
self.cbat['pmax'] = self.cbat['pmax'].div(sbase)
self.ncg = len(self.cgen)
self.neg = len(self.egen)
self.ncs = len(self.csol)
self.nes = len(self.esol)
self.ncw = len(self.cwin)
self.new = len(self.ewin)
self.ncb = len(self.cbat)
self.nel = len(self.elin)
self.nbb = self.pdem.shape[1]
self.ntt = self.prep.shape[0]
self.noo = self.prep.shape[1]
self.cds = dshed_cost
self.css = rshed_cost
self.cws = rshed_cost
self.sb = sbase
self.sf = sc_fa
self.ref_bus = ref_bus
self.vmin = vmin
self.vmax = vmax
self.inp_folder = inp_folder
self.phase = phase
self.outdir = ''
def solve(self, solver = 'glpk', neos = False, invest = False, onlyopr = True, commit = False, solemail = ''):
'''
Solve the investment and operation problem.
:param str solver: Solver to be used. Available: glpk, cbc, ipopt, gurobi
:param bool network: True/False indicates including/excluding network-related constraints
:param bool invest: True/False indicates binary/continuous nature of investement-related decision variables
:param bool onlyopr: True/False indicates if the problem will only solve the operation or both investment and operation
:param bool commit: True/False indicates if ???
:param bool neos: True/False indicates if ???
:Example:
>>> import pyeplan
>>> sys_inv = pyeplan.inosys("wat_inv", ref_bus = 260)
>>> sys_inv.solve()
'''
#Define the Model type
m = pe.ConcreteModel()
#Define the Sets
m.cg = pe.Set(initialize=list(range(self.ncg)),ordered=True)
m.eg = pe.Set(initialize=list(range(self.neg)),ordered=True)
m.cs = pe.Set(initialize=list(range(self.ncs)),ordered=True)
m.es = pe.Set(initialize=list(range(self.nes)),ordered=True)
m.cw = pe.Set(initialize=list(range(self.ncw)),ordered=True)
m.ew = pe.Set(initialize=list(range(self.new)),ordered=True)
m.cb = pe.Set(initialize=list(range(self.ncb)),ordered=True)
m.el = pe.Set(initialize=list(range(self.nel)),ordered=True)
m.bb = pe.Set(initialize=list(range(self.nbb)),ordered=True)
m.tt = pe.Set(initialize=list(range(self.ntt)),ordered=True)
m.oo = pe.Set(initialize=list(range(self.noo)),ordered=True)
#Define Variables
#Objective Function
m.z = pe.Var()
m.opr = pe.Var()
m.inv = pe.Var()
m.she = pe.Var()
#Active and Reactive Power Generations (Conventional)
m.pcg = pe.Var(m.cg,m.tt,m.oo,within=pe.NonNegativeReals)
m.peg = pe.Var(m.eg,m.tt,m.oo,within=pe.NonNegativeReals)
m.qcg = pe.Var(m.cg,m.tt,m.oo,within=pe.NonNegativeReals)
m.qeg = pe.Var(m.eg,m.tt,m.oo,within=pe.NonNegativeReals)
#Active and Reactive Power Generations (Solar)
m.pcs = pe.Var(m.cs,m.tt,m.oo,within=pe.NonNegativeReals)
m.pes = pe.Var(m.es,m.tt,m.oo,within=pe.NonNegativeReals)
m.qcs = pe.Var(m.cs,m.tt,m.oo,within=pe.Reals)
m.qes = pe.Var(m.es,m.tt,m.oo,within=pe.Reals)
#Active and Reactive Power Generations (Wind)
m.pcw = pe.Var(m.cw,m.tt,m.oo,within=pe.NonNegativeReals)
m.pew = pe.Var(m.ew,m.tt,m.oo,within=pe.NonNegativeReals)
m.qcw = pe.Var(m.cw,m.tt,m.oo,within=pe.Reals)
m.qew = pe.Var(m.ew,m.tt,m.oo,within=pe.Reals)
#Charging and Discharging Status of Battary
m.pbc = pe.Var(m.cb,m.tt,m.oo,within=pe.NonNegativeReals)
m.pbd = pe.Var(m.cb,m.tt,m.oo,within=pe.NonNegativeReals)
m.qcd = pe.Var(m.cb,m.tt,m.oo,within=pe.Reals)
#Demand, Solar, and Wind Shedding
if commit:
m.pds = pe.Var(m.bb,m.tt,m.oo,within=pe.Binary)
else:
m.pds = pe.Var(m.bb,m.tt,m.oo,within=pe.NonNegativeReals,bounds=(0,1))
m.pss = pe.Var(m.bb,m.tt,m.oo,within=pe.NonNegativeReals)
m.pws = pe.Var(m.bb,m.tt,m.oo,within=pe.NonNegativeReals)
#Active and Reactive Line Flows
m.pel = pe.Var(m.el,m.tt,m.oo,within=pe.Reals) #Active Power
m.qel = pe.Var(m.el,m.tt,m.oo,within=pe.Reals) #Reactive Power
#Voltage Magnitude
m.vol = pe.Var(m.bb,m.tt,m.oo,within=pe.Reals,bounds=(self.vmin,self.vmax))
#Commitment Status
if commit:
m.cu = pe.Var(m.cg,m.tt,m.oo,within=pe.Binary)
m.eu = pe.Var(m.eg,m.tt,m.oo,within=pe.Binary)
else:
m.cu = pe.Var(m.cg,m.tt,m.oo,within=pe.NonNegativeReals)
m.eu = pe.Var(m.eg,m.tt,m.oo,within=pe.NonNegativeReals)
if not onlyopr:
#Investment Status (Conventional)
if invest:
m.xg = pe.Var(m.cg,within=pe.Binary)
else:
m.xg = pe.Var(m.cg,within=pe.NonNegativeReals,bounds=(0,1))
#Investment Status (Solar)
if invest:
m.xs = pe.Var(m.cs,within=pe.Binary)
else:
m.xs = pe.Var(m.cs,within=pe.NonNegativeReals,bounds=(0,1))
#Investment Status (Wind)
if invest:
m.xw = pe.Var(m.cw,within=pe.Binary)
else:
m.xw = pe.Var(m.cw,within=pe.NonNegativeReals,bounds=(0,1))
#Investment Status (Battary)
if invest:
m.xb = pe.Var(m.cb,within=pe.Binary)
else:
m.xb = pe.Var(m.cb,within=pe.NonNegativeReals,bounds=(0,1))
else:
m.xg = pe.Var(m.cg,within=pe.NonNegativeReals,bounds=(0,0))
m.xs = pe.Var(m.cs,within=pe.NonNegativeReals,bounds=(0,0))
m.xw = pe.Var(m.cw,within=pe.NonNegativeReals,bounds=(0,0))
m.xb = pe.Var(m.cb,within=pe.NonNegativeReals,bounds=(0,0))
#Objective Function
def obj_rule(m):
return m.z
m.obj = pe.Objective(rule=obj_rule)
#Definition Cost
def cost_def_rule(m):
return m.z == m.inv + m.opr
m.cost_def = pe.Constraint(rule=cost_def_rule)
#Investment Cost
def inv_cost_def_rule(m):
return m.inv == self.sf*sum(self.cgen['icost'][cg]*self.cgen['pmax'][cg]*m.xg[cg] for cg in m.cg) + \
self.sf*sum(self.csol['icost'][cs]*self.csol['pmax'][cs]*m.xs[cs] for cs in m.cs) + \
self.sf*sum(self.cwin['icost'][cw]*self.cwin['pmax'][cw]*m.xw[cw] for cw in m.cw) + \
self.sf*sum(self.cbat['icost'][cb]*self.cbat['pmax'][cb]*m.xb[cb] for cb in m.cb)
m.inv_cost_def = pe.Constraint(rule=inv_cost_def_rule)
#Operation Cost
def opr_cost_def_rule(m):
return m.opr == self.sf*self.sb*(sum(self.dtim['dt'][oo]*self.cgen['ocost'][cg]*m.pcg[cg,tt,oo] for cg in m.cg for tt in m.tt for oo in m.oo) + \
sum(self.dtim['dt'][oo]*self.egen['ocost'][eg]*m.peg[eg,tt,oo] for eg in m.eg for tt in m.tt for oo in m.oo) + \
sum(self.dtim['dt'][oo]*self.csol['ocost'][cs]*m.pcs[cs,tt,oo] for cs in m.cs for tt in m.tt for oo in m.oo) + \
sum(self.dtim['dt'][oo]*self.esol['ocost'][es]*m.pes[es,tt,oo] for es in m.es for tt in m.tt for oo in m.oo) + \
sum(self.dtim['dt'][oo]*self.cwin['ocost'][cw]*m.pcw[cw,tt,oo] for cw in m.cw for tt in m.tt for oo in m.oo) + \
sum(self.dtim['dt'][oo]*self.ewin['ocost'][ew]*m.pew[ew,tt,oo] for ew in m.ew for tt in m.tt for oo in m.oo) + \
sum(self.dtim['dt'][oo]*self.cds*self.pdem.iloc[tt,bb]*self.prep.iloc[tt,oo]*m.pds[bb,tt,oo] for bb in m.bb for tt in m.tt for oo in m.oo) + \
sum(self.dtim['dt'][oo]*self.css*m.pss[bb,tt,oo] for bb in m.bb for tt in m.tt for oo in m.oo)+ \
sum(self.dtim['dt'][oo]*self.cws*m.pws[bb,tt,oo] for bb in m.bb for tt in m.tt for oo in m.oo))
m.opr_cost_def = pe.Constraint(rule=opr_cost_def_rule)
#Shedding Cost
def she_cost_def_rule(m):
return m.she == self.sf*self.sb*(sum(self.dtim['dt'][oo]*self.cds*self.pdem.iloc[tt,bb]*self.prep.iloc[tt,oo]*m.pds[bb,tt,oo] for bb in m.bb for tt in m.tt for oo in m.oo) + \
sum(self.dtim['dt'][oo]*self.css*m.pss[bb,tt,oo] for bb in m.bb for tt in m.tt for oo in m.oo)+ \
sum(self.dtim['dt'][oo]*self.cws*m.pws[bb,tt,oo] for bb in m.bb for tt in m.tt for oo in m.oo))
m.she_cost_def = pe.Constraint(rule=she_cost_def_rule)
#Active Energy Balance
def act_bal_rule(m,bb,tt,oo):
return (1/self.phase)*sum(m.pcg[cg,tt,oo] for cg in m.cg if self.cgen['bus'][cg] == bb) + \
(1/self.phase)*sum(m.peg[eg,tt,oo] for eg in m.eg if self.egen['bus'][eg] == bb) + \
(1/self.phase)*sum(m.pcs[cs,tt,oo] for cs in m.cs if self.csol['bus'][cs] == bb) + \
(1/self.phase)*sum(m.pes[es,tt,oo] for es in m.es if self.esol['bus'][es] == bb) + \
(1/self.phase)*sum(m.pcw[cw,tt,oo] for cw in m.cw if self.cwin['bus'][cw] == bb) + \
(1/self.phase)*sum(m.pew[ew,tt,oo] for ew in m.ew if self.ewin['bus'][ew] == bb) + \
(1/self.phase)*sum(m.pbd[cb,tt,oo] for cb in m.cb if self.cbat['bus'][cb] == bb) - \
(1/self.phase)*sum(m.pbc[cb,tt,oo] for cb in m.cb if self.cbat['bus'][cb] == bb) + \
sum(m.pel[el,tt,oo] for el in m.el if self.elin['to'][el] == bb) == \
sum(m.pel[el,tt,oo] for el in m.el if self.elin['from'][el] == bb) + \
self.pdem.iloc[tt,bb]*self.prep.iloc[tt,oo]*1/self.phase*(1 - m.pds[bb,tt,oo]) + \
m.pss[bb,tt,oo] + m.pws[bb,tt,oo]
m.act_bal = pe.Constraint(m.bb, m.tt, m.oo, rule=act_bal_rule)
#Reactive Energy Balance
def rea_bal_rule(m,bb,tt,oo):
return (1/self.phase)*sum(m.qcg[cg,tt,oo] for cg in m.cg if self.cgen['bus'][cg] == bb) + \
(1/self.phase)*sum(m.qeg[eg,tt,oo] for eg in m.eg if self.egen['bus'][eg] == bb) + \
(1/self.phase)*sum(m.qcs[cs,tt,oo] for cs in m.cs if self.csol['bus'][cs] == bb) + \
(1/self.phase)*sum(m.qes[es,tt,oo] for es in m.es if self.esol['bus'][es] == bb) + \
(1/self.phase)*sum(m.qcw[cw,tt,oo] for cw in m.cw if self.cwin['bus'][cw] == bb) + \
(1/self.phase)*sum(m.qew[ew,tt,oo] for ew in m.ew if self.ewin['bus'][ew] == bb) + \
(1/self.phase)*sum(m.qcd[cb,tt,oo] for cb in m.cb if self.cbat['bus'][cb] == bb) + \
sum(m.qel[el,tt,oo] for el in m.el if self.elin['to'][el] == bb) == \
sum(m.qel[el,tt,oo] for el in m.el if self.elin['from'][el] == bb) + \
self.qdem.iloc[tt,bb]*self.qrep.iloc[tt,oo]*(1/self.phase)*(1 - m.pds[bb,tt,oo])
m.rea_bal = pe.Constraint(m.bb, m.tt, m.oo, rule=rea_bal_rule)
#Minimum Active Generation (Conventional)
def min_act_cgen_rule(m,cg,tt,oo):
return m.pcg[cg,tt,oo] >= m.cu[cg,tt,oo]*self.cgen['pmin'][cg]
m.min_act_cgen = pe.Constraint(m.cg, m.tt, m.oo, rule=min_act_cgen_rule)
def min_act_egen_rule(m,eg,tt,oo):
return m.peg[eg,tt,oo] >= m.eu[eg,tt,oo]*self.egen['pmin'][eg]
m.min_act_egen = pe.Constraint(m.eg, m.tt, m.oo, rule=min_act_egen_rule)
#Minimum Active Generation (Solar)
def min_act_csol_rule(m,cs,tt,oo):
return m.pcs[cs,tt,oo] >= m.xs[cs]*self.csol['pmin'][cs]
m.min_act_csol = pe.Constraint(m.cs, m.tt, m.oo, rule=min_act_csol_rule)
def min_act_esol_rule(m,es,tt,oo):
return m.pes[es,tt,oo] >= self.esol['pmin'][es]
m.min_act_esol = pe.Constraint(m.es, m.tt, m.oo, rule=min_act_esol_rule)
#Minimum Active Generation (Wind)
def min_act_cwin_rule(m,cw,tt,oo):
return m.pcw[cw,tt,oo] >= m.xw[cw]*self.cwin['pmin'][cw]
m.min_act_cwin = pe.Constraint(m.cw, m.tt, m.oo, rule=min_act_cwin_rule)
def min_act_ewin_rule(m,ew,tt,oo):
return m.pew[ew,tt,oo] >= self.ewin['pmin'][ew]
m.min_act_ewin = pe.Constraint(m.ew, m.tt, m.oo, rule=min_act_ewin_rule)
#Minimum Active Charging and Discharging (Battery)
def min_act_cbat_rule(m,cb,tt,oo):
return m.pbc[cb,tt,oo] >= m.xb[cb]*self.cbat['pmin'][cb]
m.min_act_cbat = pe.Constraint(m.cb, m.tt, m.oo, rule=min_act_cbat_rule)
def min_act_dbat_rule(m,cb,tt,oo):
return m.pbd[cb,tt,oo] >= m.xb[cb]*self.cbat['pmin'][cb]
m.min_act_dbat = pe.Constraint(m.cb, m.tt, m.oo, rule=min_act_dbat_rule)
#Maximum Active Generation (Conventional)
def max_act_cgen_rule(m,cg,tt,oo):
return m.pcg[cg,tt,oo] <= m.cu[cg,tt,oo]*self.cgen['pmax'][cg]
m.max_act_cgen = pe.Constraint(m.cg, m.tt, m.oo, rule=max_act_cgen_rule)
def max_act_egen_rule(m,eg,tt,oo):
return m.peg[eg,tt,oo] <= m.eu[eg,tt,oo]*self.egen['pmax'][eg]
m.max_act_egen = pe.Constraint(m.eg, m.tt, m.oo, rule=max_act_egen_rule)
#Maximum Active Generation (Solar)
def max_act_csol_rule(m,cs,tt,oo):
return m.pcs[cs,tt,oo] <= m.xs[cs]*self.psol.iloc[tt,oo]
m.max_act_csol = pe.Constraint(m.cs, m.tt, m.oo, rule=max_act_csol_rule)
def max_act_esol_rule(m,es,tt,oo):
return m.pes[es,tt,oo] <= self.psol.iloc[tt,oo]
m.max_act_esol = pe.Constraint(m.es, m.tt, m.oo, rule=max_act_esol_rule)
#Maximum Active Generation (Wind)
def max_act_cwin_rule(m,cw,tt,oo):
return m.pcw[cw,tt,oo] <= m.xw[cw]*self.pwin.iloc[tt,oo]
m.max_act_cwin = pe.Constraint(m.cw, m.tt, m.oo, rule=max_act_cwin_rule)
def max_act_ewin_rule(m,ew,tt,oo):
return m.pew[ew,tt,oo] <= self.pwin.iloc[tt,oo]
m.max_act_ewin = pe.Constraint(m.ew, m.tt, m.oo, rule=max_act_ewin_rule)
#Maximum Active Charging and Discharging (Battery)
def max_act_cbat_rule(m,cb,tt,oo):
return m.pbc[cb,tt,oo] <= m.xb[cb]*self.cbat['pmax'][cb]
m.max_act_cbat = pe.Constraint(m.cb, m.tt, m.oo, rule=max_act_cbat_rule)
def max_act_dbat_rule(m,cb,tt,oo):
return m.pbd[cb,tt,oo] <= m.xb[cb]*self.cbat['pmax'][cb]
m.max_act_dbat = pe.Constraint(m.cb, m.tt, m.oo, rule=max_act_dbat_rule)
#Minimum Reactive Generation (Conventional)
def min_rea_cgen_rule(m,cg,tt,oo):
return m.qcg[cg,tt,oo] >= m.cu[cg,tt,oo]*self.cgen['qmin'][cg]
m.min_rea_cgen = pe.Constraint(m.cg, m.tt, m.oo, rule=min_rea_cgen_rule)
def min_rea_egen_rule(m,eg,tt,oo):
return m.qeg[eg,tt,oo] >= m.eu[eg,tt,oo]*self.egen['qmin'][eg]
m.min_rea_egen = pe.Constraint(m.eg, m.tt, m.oo, rule=min_rea_egen_rule)
#Minimum Reactive Generation (Solar)
def min_rea_csol_rule(m,cs,tt,oo):
return m.qcs[cs,tt,oo] >= m.xs[cs]*self.csol['qmin'][cs]
m.min_rea_csol = pe.Constraint(m.cs, m.tt, m.oo, rule=min_rea_csol_rule)
def min_rea_esol_rule(m,es,tt,oo):
return m.qes[es,tt,oo] >= self.esol['qmin'][es]
m.min_rea_esol = pe.Constraint(m.es, m.tt, m.oo, rule=min_rea_esol_rule)
#Minimum Reactive Generation (Wind)
def min_rea_cwin_rule(m,cw,tt,oo):
return m.qcw[cw,tt,oo] >= m.xw[cw]*self.cwin['qmin'][cw]
m.min_rea_cwin = pe.Constraint(m.cw, m.tt, m.oo, rule=min_rea_cwin_rule)
def min_rea_ewin_rule(m,ew,tt,oo):
return m.qew[ew,tt,oo] >= self.ewin['qmin'][ew]
m.min_rea_ewin = pe.Constraint(m.ew, m.tt, m.oo, rule=min_rea_ewin_rule)
#Minimum Reactive Generation (Battery)
def min_rea_bat_rule(m,cb,tt,oo):
return m.qcd[cb,tt,oo] >= m.xb[cb]*self.cbat['qmin'][cb]
m.min_rea_bat = pe.Constraint(m.cb, m.tt, m.oo, rule=min_rea_bat_rule)
#Maximum Reactive Generation (Conventional)
def max_rea_cgen_rule(m,cg,tt,oo):
return m.qcg[cg,tt,oo] <= m.cu[cg,tt,oo]*self.cgen['qmax'][cg]
m.max_rea_cgen = pe.Constraint(m.cg, m.tt, m.oo, rule=max_rea_cgen_rule)
def max_rea_egen_rule(m,eg,tt,oo):
return m.qeg[eg,tt,oo] <= m.eu[eg,tt,oo]*self.egen['qmax'][eg]
m.max_rea_egen = pe.Constraint(m.eg, m.tt, m.oo, rule=max_rea_egen_rule)
#Maximum Reactive Generation (Solar)
def max_rea_csol_rule(m,cs,tt,oo):
return m.qcs[cs,tt,oo] <= m.xs[cs]*self.csol['qmax'][cs]
m.max_rea_csol = pe.Constraint(m.cs, m.tt, m.oo, rule=max_rea_csol_rule)
def max_rea_esol_rule(m,es,tt,oo):
return m.qes[es,tt,oo] <= self.esol['qmax'][es]
m.max_rea_esol = pe.Constraint(m.es, m.tt, m.oo, rule=max_rea_esol_rule)
#Maximum Reactive Generation (Wind)
def max_rea_cwin_rule(m,cw,tt,oo):
return m.qcw[cw,tt,oo] <= m.xw[cw]*self.cwin['qmax'][cw]
m.max_rea_cwin = pe.Constraint(m.cw, m.tt, m.oo, rule=max_rea_cwin_rule)
def max_rea_ewin_rule(m,ew,tt,oo):
return m.qew[ew,tt,oo] <= self.ewin['qmax'][ew]
m.max_rea_ewin = pe.Constraint(m.ew, m.tt, m.oo, rule=max_rea_ewin_rule)
#Minimum Reactive Generation (Battery)
def max_rea_bat_rule(m,cb,tt,oo):
return m.qcd[cb,tt,oo] <= m.xb[cb]*self.cbat['qmax'][cb]
m.max_rea_bat = pe.Constraint(m.cb, m.tt, m.oo, rule=max_rea_bat_rule)
#Minimum and Maximum Energy (Battery)
def min_eng_bat_rule(m,cb,tt,oo):
return self.cbat['eini'][cb]*m.xb[cb] + \
sum(m.pbc[cb,t,oo]*self.cbat['ec'][cb] for t in m.tt if t <= tt) - \
sum(m.pbd[cb,t,oo]/self.cbat['ed'][cb] for t in m.tt if t <= tt) >= m.xb[cb]*self.cbat['emin'][cb]
m.min_eng_bat = pe.Constraint(m.cb, m.tt, m.oo, rule=min_eng_bat_rule)
def max_eng_bat_rule(m,cb,tt,oo):
return self.cbat['eini'][cb]*m.xb[cb] + \
sum(m.pbc[cb,t,oo]*self.cbat['ec'][cb] for t in m.tt if t <= tt) - \
sum(m.pbd[cb,t,oo]/self.cbat['ed'][cb] for t in m.tt if t <= tt) <= m.xb[cb]*self.cbat['emax'][cb]
m.max_eng_bat = pe.Constraint(m.cb, m.tt, m.oo, rule=max_eng_bat_rule)
def cop_eng_bat_rule(m,cb,oo):
return sum(m.pbc[cb,t,oo]*self.cbat['ec'][cb] for t in m.tt) == \
sum(m.pbd[cb,t,oo]/self.cbat['ed'][cb] for t in m.tt)
m.cop_eng_bat = pe.Constraint(m.cb, m.oo, rule=cop_eng_bat_rule)
#Maximum Solar Shedding
def max_sol_shed_rule(m,bb,tt,oo):
return m.pss[bb,tt,oo] == (sum(m.xs[cs]*self.psol.iloc[tt,oo] for cs in m.cs if self.csol['bus'][cs] == bb ) + \
sum(self.psol.iloc[tt,oo] for es in m.es if self.esol['bus'][es] == bb )) - \
(sum(m.pcs[cs,tt,oo] for cs in m.cs if self.csol['bus'][cs] == bb) + \
sum(m.pes[es,tt,oo] for es in m.es if self.esol['bus'][es] == bb))
m.max_sol_shed = pe.Constraint(m.bb, m.tt, m.oo, rule=max_sol_shed_rule)
#Maximum Wind Shedding
def max_win_shed_rule(m,bb,tt,oo):
return m.pws[bb,tt,oo] == (sum(m.xw[cw]*self.pwin.iloc[tt,oo] for cw in m.cw if self.cwin['bus'][cw] == bb) + \
sum(self.pwin.iloc[tt,oo] for ew in m.ew if self.ewin['bus'][ew] == bb)) - \
(sum(m.pcw[cw,tt,oo] for cw in m.cw if self.cwin['bus'][cw] == bb) + \
sum(m.pew[ew,tt,oo] for ew in m.ew if self.ewin['bus'][ew] == bb))
m.max_win_shed = pe.Constraint(m.bb, m.tt, m.oo, rule=max_win_shed_rule)
#Line flow Definition
def flow_rule(m,el,tt,oo):
return (m.vol[self.elin['from'][el],tt,oo] - m.vol[self.elin['to'][el],tt,oo]) == \
self.elin['res'][el]*(m.pel[el,tt,oo]) + \
self.elin['rea'][el]*(m.qel[el,tt,oo])
m.flow = pe.Constraint(m.el, m.tt, m.oo, rule=flow_rule)
#Max Active Line Flow
def max_act_eflow_rule(m,el,tt,oo):
return m.pel[el,tt,oo] <= self.elin['pmax'][el]*self.elin['ini'][el]
m.max_act_eflow = pe.Constraint(m.el, m.tt, m.oo, rule=max_act_eflow_rule)
#Min Active Line Flow
def min_act_eflow_rule(m,el,tt,oo):
return m.pel[el,tt,oo] >= -self.elin['pmax'][el]*self.elin['ini'][el]
m.min_act_eflow = pe.Constraint(m.el, m.tt, m.oo, rule=min_act_eflow_rule)
#Max Reactive Line Flow
def max_rea_eflow_rule(m,el,tt,oo):
return m.qel[el,tt,oo] <= self.elin['qmax'][el]*self.elin['ini'][el]
m.max_rea_eflow = pe.Constraint(m.el, m.tt, m.oo, rule=max_rea_eflow_rule)
#Min Reactive Line Flow
def min_rea_eflow_rule(m,el,tt,oo):
return m.qel[el,tt,oo] >= -self.elin['qmax'][el]*self.elin['ini'][el]
m.min_rea_eflow = pe.Constraint(m.el, m.tt, m.oo, rule=min_rea_eflow_rule)
#Voltage Magnitude at Reference Bus
def vol_ref_rule(m,tt,oo):
return sum(m.vol[bb,tt,oo] for bb in m.bb if bb==self.ref_bus) == 1
m.vol_ref = pe.Constraint(m.tt, m.oo, rule=vol_ref_rule)
#Investment Status
def inv_stat_rule(m,cg,tt,oo):
return m.cu[cg,tt,oo] <= m.xg[cg]
m.inv_stat = pe.Constraint(m.cg, m.tt, m.oo, rule=inv_stat_rule)
def inv_bat_rule(m):
return sum(m.xb[cb] for cb in m.cb) <= \
sum(m.xs[cs] for cs in m.cs) + \
sum(m.xw[cw] for cw in m.cw)
m.inv_bat = pe.Constraint(rule=inv_bat_rule)
#Solve the optimization problem
if solver == 'gurobi':
opt = pe.SolverFactory(solver, solver_io='python')
opt.options['threads'] = 0
opt.options['mipgap'] = 0
else:
opt = pe.SolverFactory(solver)
if neos:
os.environ['NEOS_EMAIL'] = solemail
solver_manager = pe.SolverManagerFactory('neos')
result = solver_manager.solve(m,opt=opt,symbolic_solver_labels=True,tee=True)
else:
result = opt.solve(m,symbolic_solver_labels=True,tee=True)
self.output = m
self.total = round(m.z.value,6)
self.total_inv = round(m.inv.value,6)
self.total_opr = round(m.opr.value,6)
self.xg_output = pyomo2dfinv(m.xg,m.cg).T
self.xs_output = pyomo2dfinv(m.xs,m.cs).T
self.xw_output = pyomo2dfinv(m.xw,m.cw).T
self.xb_output = pyomo2dfinv(m.xb,m.cb).T
self.cu_output = pyomo2dfoprm(m.cu,m.cg,m.tt,m.oo).T
self.eu_output = pyomo2dfoprm(m.eu,m.eg,m.tt,m.oo).T
self.pcg_output = pyomo2dfopr(m.pcg,m.cg,m.tt,m.oo).T
self.qcg_output = pyomo2dfopr(m.qcg,m.cg,m.tt,m.oo).T
self.peg_output = pyomo2dfopr(m.peg,m.eg,m.tt,m.oo).T
self.qeg_output = pyomo2dfopr(m.qeg,m.eg,m.tt,m.oo).T
self.pcs_output = pyomo2dfopr(m.pcs,m.cs,m.tt,m.oo).T
self.qcs_output = pyomo2dfopr(m.qcs,m.cs,m.tt,m.oo).T
self.pes_output = pyomo2dfopr(m.pes,m.es,m.tt,m.oo).T
self.qes_output = pyomo2dfopr(m.qes,m.es,m.tt,m.oo).T
self.pcw_output = pyomo2dfopr(m.pcw,m.cw,m.tt,m.oo).T
self.qcw_output = pyomo2dfopr(m.qcw,m.cw,m.tt,m.oo).T
self.pew_output = pyomo2dfopr(m.pew,m.ew,m.tt,m.oo).T
self.qew_output = pyomo2dfopr(m.qew,m.ew,m.tt,m.oo).T
self.pbc_output = pyomo2dfopr(m.pbc,m.cb,m.tt,m.oo).T
self.pbd_output = pyomo2dfopr(m.pbd,m.cb,m.tt,m.oo).T
self.qcd_output = pyomo2dfopr(m.qcd,m.cb,m.tt,m.oo).T
self.pds_output = pyomo2dfoprm(m.pds,m.bb,m.tt,m.oo).T
self.pss_output = pyomo2dfopr(m.pss,m.bb,m.tt,m.oo).T
self.pws_output = pyomo2dfopr(m.pws,m.bb,m.tt,m.oo).T
self.vol_output = pyomo2dfoprm(m.vol,m.bb,m.tt,m.oo).T
self.pel_output = pyomo2dfopr(m.pel,m.el,m.tt,m.oo).T
self.qel_output = pyomo2dfopr(m.qel,m.el,m.tt,m.oo).T
# Setup the results folder
self.outdir = self.inp_folder + os.sep + 'results'
if os.path.exists(self.outdir):
shutil.rmtree(self.outdir)
os.makedirs(self.outdir)
with open(self.outdir + os.sep + 'obj.csv', 'w', newline='') as csvfile:
thewriter = csv.writer(csvfile)
thewriter.writerow(['total costs', self.total])
thewriter.writerow(['total investment costs', self.total_inv])
thewriter.writerow(['total operation costs', self.total_opr])
self.xg_output.to_csv(self.outdir + os.sep + 'xg.csv', index=False)
self.xs_output.to_csv(self.outdir + os.sep + 'xs.csv', index=False)
self.xw_output.to_csv(self.outdir + os.sep + 'xw.csv', index=False)
self.xb_output.to_csv(self.outdir + os.sep + 'xb.csv', index=False)
self.cu_output.to_csv(self.outdir + os.sep + 'cu.csv', index=False)
self.eu_output.to_csv(self.outdir + os.sep + 'eu.csv', index=False)
self.pcg_output.to_csv(self.outdir + os.sep + 'pcg.csv', index=False)
self.qcg_output.to_csv(self.outdir + os.sep + 'qcg.csv', index=False)
self.peg_output.to_csv(self.outdir + os.sep + 'peg.csv', index=False)
self.qeg_output.to_csv(self.outdir + os.sep + 'qeg.csv', index=False)
self.pcs_output.to_csv(self.outdir + os.sep + 'pcs.csv',index=False)
self.qcs_output.to_csv(self.outdir + os.sep + 'qcs.csv',index=False)
self.pes_output.to_csv(self.outdir + os.sep + 'pes.csv',index=False)
self.qes_output.to_csv(self.outdir + os.sep + 'qes.csv',index=False)
self.pcw_output.to_csv(self.outdir + os.sep + 'pcw.csv',index=False)
self.qcw_output.to_csv(self.outdir + os.sep + 'qcw.csv',index=False)
self.pew_output.to_csv(self.outdir + os.sep + 'pew.csv',index=False)
self.qew_output.to_csv(self.outdir + os.sep + 'qew.csv',index=False)
self.pbc_output.to_csv(self.outdir + os.sep + 'pbc.csv',index=False)
self.pbd_output.to_csv(self.outdir + os.sep + 'pbd.csv',index=False)
self.qcd_output.to_csv(self.outdir + os.sep + 'qcd.csv',index=False)
self.pds_output.to_csv(self.outdir + os.sep + 'pds.csv',index=False)
self.pss_output.to_csv(self.outdir + os.sep + 'pss.csv',index=False)
self.pws_output.to_csv(self.outdir + os.sep + 'pws.csv',index=False)
self.vol_output.to_csv(self.outdir + os.sep + 'vol.csv',index=False)
self.pel_output.to_csv(self.outdir + os.sep + 'pel.csv',index=False)
self.qel_output.to_csv(self.outdir + os.sep + 'qel.csv',index=False)
def resCost(self):
'''Display the objective cost results.'''
if self.outdir != '' and os.path.exists(self.outdir):
display(pd.read_csv(self.outdir + os.sep + "obj.csv"))
else:
print('Need to succesfully run the solve function first.')
raise
def resWind(self):
'''Display the Wind capacity investment results'''
if self.outdir != '' and os.path.exists(self.outdir):
cwin = pd.read_csv(self.inp_folder + os.sep + "cwin_dist.csv")
iwin = pd.read_csv(self.outdir + os.sep + "xw.csv")
cwin['Unit'] = (np.arange(1,len(iwin.columns)+1))
unit = cwin.loc[:,'Unit']
bus = np.array(cwin.loc[:,'bus'])
out_win =(((cwin.loc[:,'pmax']*round(iwin.loc[0:,].T,2))[0]).to_frame().set_index(unit)).rename(columns={0: 'Installed Capacity (kW)'})
out_win['Bus'] = bus
out_win.style
display(out_win)
else:
print('Need to succesfully run the solve function first.')
raise
def resBat(self):
'''Display the Battery capacity investment results'''
if self.outdir != '' and os.path.exists(self.outdir):
cbat = pd.read_csv(self.inp_folder + os.sep + "cbat_dist.csv")
ibat = pd.read_csv(self.outdir + os.sep + "xb.csv")
cbat['Unit'] = (np.arange(1,len(ibat.columns)+1))
unit = cbat.loc[:,'Unit']
bus = np.array(cbat.loc[:,'bus'])
out_bat =(((cbat.loc[:,'pmax']*round(ibat.loc[0:,].T,2))[0]).to_frame().set_index(unit)).rename(columns={0: 'Installed Capacity (kW)'})
out_bat['Bus'] = bus
out_bat.style
display(out_bat)
else:
print('Need to succesfully run the solve function first.')
raise
def resSolar(self):
'''Display the Solar capacity investment results'''
if self.outdir != '' and os.path.exists(self.outdir):
csol = pd.read_csv(self.inp_folder + os.sep + "csol_dist.csv")
isol = | pd.read_csv(self.outdir + os.sep + "xs.csv") | pandas.read_csv |
from collections import defaultdict
import math
import numpy as np
import pandas as pd
import re
def get_gene_id(row):
if "gene_name" in row["attribute"]:
return row["attribute"].split("gene_name")[-1].split('"')[1]
elif ";gene=" in row["attribute"]:
return row["attribute"].split(";gene=")[-1].split(";")[0]
def modify_refnames(CI, gtf_file, stranded_library):
gtf_df = pd.read_csv(gtf_file,sep="\t",names=["seqname","source","feature","start","end","score","strand","frame","attribute"],comment="#")
gtf_df["gene_name"] = gtf_df.apply(get_gene_id, axis=1)
gtf_df = gtf_df[['seqname', 'strand','gene_name']]
gene_strand_info = gtf_df.drop_duplicates().reset_index(drop=True)
swap_names = False
CI["HIR1B"] = CI["HIR1A"]
CI_new = CI.drop_duplicates("refName_ABR1")
CI_new["geneR1A"] = CI_new["geneR1A"].fillna("")
CI_new["geneR1B"] = CI_new["geneR1B"].fillna("")
CI_new.loc[CI_new["fileTypeR1"] == "Aligned","read_strandR1B"] = CI_new[CI_new["fileTypeR1"] == "Aligned"]["read_strandR1A"]
CI_new["gene_strandR1A"] = CI_new["refName_ABR1"].str.split("|").str[0].str.split(":").str[-1]
CI_new["gene_strandR1B"] = CI_new["refName_ABR1"].str.split("|").str[1].str.split(":").str[-1]
CI_new["numgeneR1A"] = CI_new["geneR1A"].str.split(",").str.len()#.astype("Int32") # the number of overlapping genes on the R1A side
CI_new[["numgeneR1A"]] = CI_new[["numgeneR1A"]].fillna(0)
CI_new["numgeneR1B"] = CI_new["geneR1B"].str.split(",").str.len()#.astype("Int32") # the number of overlapping genes on the R1B side
CI_new[["numgeneR1B"]] = CI_new[["numgeneR1B"]].fillna(0)
weird_genes = ["SNORA","RP11","RP4-","SCARNA","DLEU2","SNORD","CTSLP2"]
for weird_gene in weird_genes:
for suff in ["A","B"]:
ind = CI_new[((CI_new["numgeneR1" + suff] > 2) & (CI_new["geneR1" + suff].str.contains(weird_gene,na=False))) | ((CI_new["numgeneR1" + suff] > 1) & ~(CI_new["gene_strandR1" + suff] == "?") & (CI_new["geneR1" + suff].str.contains(weird_gene,na=False)))].index
CI_new.loc[ind,"geneR1" + suff] = CI_new.loc[ind,"geneR1" + suff].str.replace("{}[^,]*[,]".format(weird_gene),"",regex=True).str.replace(",{}.*".format(weird_gene),"")
CI_new.loc[ind,"numgeneR1" + suff] = CI_new.loc[ind,"geneR1" + suff].str.split(",").str.len()#.astype("Int32")
CI_new["shared_gene"] = [",".join([x for x in a.split(",") if x in b.split(",")]) for a,b in zip(CI_new["geneR1A"],CI_new["geneR1B"])]
CI_new["num_shared_genes"] = CI_new["shared_gene"].str.split(",").str.len()
CI_new.loc[CI_new["shared_gene"] == "","num_shared_genes"] = 0
ind = CI_new[(CI_new["num_shared_genes"] > 0) & ((CI_new["numgeneR1A"] > 1) | (CI_new["numgeneR1B"] > 1))].index
CI_new.loc[ind,"geneR1A"] = CI_new.loc[ind]["shared_gene"].str.split(",").str[-1]
CI_new.loc[ind,"geneR1B"] = CI_new.loc[ind]["shared_gene"].str.split(",").str[-1]
CI_new["geneR1A_uniq"] = CI_new["geneR1A"]
CI_new["geneR1B_uniq"] = CI_new["geneR1B"]
ind = CI_new[(CI_new["numgeneR1A"] > 1) & (CI_new["num_shared_genes"] == 0)].index
CI_new.loc[ind,"geneR1A_uniq"] = CI_new.loc[ind]["geneR1A"].str.split(",").str[-1]
ind = CI_new[(CI_new["numgeneR1B"] > 1) & (CI_new["num_shared_genes"] == 0)].index
CI_new.loc[ind,"geneR1B_uniq"] = CI_new.loc[ind]["geneR1B"].str.split(",").str[-1]
for let in ["A","B"]:
CI_new = CI_new.merge(gene_strand_info,how="left",left_on = ["geneR1{}_uniq".format(let),"chrR1{}".format(let)], right_on=["gene_name","seqname"])
CI_new = CI_new.rename(columns={"strand" : "gene_strandR1{}_new".format(let)})
CI_new = CI_new.drop(["gene_name","seqname"],axis=1)
# if the library is stranded, we want to keep the read strand; the genes should all come from that strand as well (when not, it seems to be due to strand ambiguity, i.e. the gene appears on both strands)
if stranded_library:
for let in ["A","B"]:
CI_new["gene_strandR1{}_new".format(let)] = CI_new["read_strandR1{}".format(let)]
ind = CI_new[((((CI_new["gene_strandR1A_new"] != CI_new["read_strandR1A"]) & (CI_new["gene_strandR1B_new"] == CI_new["read_strandR1B"])) | ((CI_new["gene_strandR1A_new"] == CI_new["read_strandR1A"]) & (~CI_new["gene_strandR1B_new"].isna()) & (CI_new["gene_strandR1B_new"] != CI_new["read_strandR1B"]))) & (CI_new["gene_strandR1A"] == "?") & (CI_new["num_shared_genes"] == 0)) & (CI_new["numgeneR1A"] > 1)].index
CI_new.loc[ind,"geneR1A_uniq"] = CI_new.loc[ind]["geneR1A"].str.split(",").str[-2]
CI_new = CI_new.drop(["gene_strandR1A_new"],axis=1)
CI_new = CI_new.merge(gene_strand_info,how="left",left_on = ["geneR1A_uniq","chrR1A"], right_on=["gene_name","seqname"])
CI_new = CI_new.drop(["gene_name","seqname"],axis=1)
CI_new = CI_new.rename(columns={"strand" : "gene_strandR1A_new"})
ind = CI_new[(((CI_new["gene_strandR1A_new"] != CI_new["read_strandR1A"]) & (~CI_new["gene_strandR1A_new"].isna()) & (CI_new["gene_strandR1B_new"] == CI_new["read_strandR1B"])) | ((CI_new["gene_strandR1A_new"] == CI_new["read_strandR1A"]) & (CI_new["gene_strandR1B_new"] != CI_new["read_strandR1B"]))) & (CI_new["gene_strandR1B"] == "?") & (CI_new["num_shared_genes"] == 0) & (CI_new["numgeneR1B"] > 1)].index
CI_new.loc[ind,"geneR1B_uniq"] = CI_new.loc[ind]["geneR1B"].str.split(",").str[-2]
CI_new = CI_new.drop(["gene_strandR1B_new"],axis=1)
CI_new = CI_new.merge(gene_strand_info,how="left",left_on = ["geneR1B_uniq","chrR1B"], right_on=["gene_name","seqname"])
CI_new = CI_new.rename(columns={"strand" : "gene_strandR1B_new"})
CI_new = CI_new.drop(["gene_name","seqname"],axis=1)
if stranded_library:
for let in ["A","B"]:
CI_new["gene_strandR1{}_new".format(let)] = CI_new["read_strandR1{}".format(let)]
reverse = {"+" : "-", "-" : "+"}
same = {"-" : "-", "+" : "+"}
ind = CI_new[(CI_new["gene_strandR1B_new"].isna()) & (CI_new["gene_strandR1A_new"] == CI_new["read_strandR1A"])].index
CI_new.loc[ind,"gene_strandR1B_new"] = CI_new.loc[ind]["read_strandR1B"].map(same)
ind = CI_new[(CI_new["gene_strandR1B_new"].isna()) & (CI_new["gene_strandR1A_new"] != CI_new["read_strandR1A"]) & (~CI_new["gene_strandR1A_new"].isna())].index
CI_new.loc[ind,"gene_strandR1B_new"] = CI_new.loc[ind]["read_strandR1B"].map(reverse)
ind = CI_new[(CI_new["gene_strandR1A_new"].isna()) & (CI_new["gene_strandR1B_new"] == CI_new["read_strandR1B"])].index
CI_new.loc[ind,"gene_strandR1A_new"] = CI_new.loc[ind]["read_strandR1A"].map(same)
ind = CI_new[(CI_new["gene_strandR1A_new"].isna()) & (CI_new["gene_strandR1B_new"] != CI_new["read_strandR1B"]) & (~CI_new["gene_strandR1B_new"].isna())].index
CI_new.loc[ind,"gene_strandR1A_new"] = CI_new.loc[ind]["read_strandR1A"].map(reverse)
CI_new["refName_newR1"] = ""
CI_new["geneR1B_uniq"].fillna("",inplace=True)
CI_new["geneR1A_uniq"].fillna("",inplace=True)
CI_new["reverse"] = False
ind = CI_new[(CI_new["fileTypeR1"] == "Aligned") & (CI_new["gene_strandR1A_new"] == "-") & (CI_new["juncPosR1A"] < CI_new["juncPosR1B"])].index
CI_new.loc[ind,"refName_newR1"] = CI_new.loc[ind]["chrR1B"] + ":" + CI_new.loc[ind]["geneR1B_uniq"].astype(str) + ":" + CI_new.loc[ind]["juncPosR1B"].astype(str) + ":" + CI_new.loc[ind]["gene_strandR1B_new"] + "|" + CI_new.loc[ind]["chrR1A"] + ":" + CI_new.loc[ind]["geneR1A_uniq"].astype(str) + ":" + CI_new.loc[ind]["juncPosR1A"].astype(str) + ":" + CI_new.loc[ind]["gene_strandR1A_new"]
CI_new.loc[ind,"reverse"] = True
name_swap = {}
for c in CI_new.columns:
if "R1A" in c:
name_swap[c] = c.replace("R1A","R1B")
name_swap[c.replace("R1A","R1B")] = c
if swap_names:
CI_new.loc[ind] = CI_new.loc[ind].rename(columns=name_swap)
ind = CI_new[(CI_new["fileTypeR1"] == "Aligned") & (CI_new["gene_strandR1A_new"] == "+")].index
CI_new.loc[ind,"refName_newR1"] = CI_new.loc[ind]["chrR1A"] + ":" + CI_new.loc[ind]["geneR1A_uniq"] + ":" + CI_new.loc[ind]["juncPosR1A"].astype(str) + ":" + CI_new.loc[ind]["gene_strandR1A_new"] + "|" + CI_new.loc[ind]["chrR1B"] + ":" + CI_new.loc[ind]["geneR1B_uniq"] + ":" + CI_new.loc[ind]["juncPosR1B"].astype(str) + ":" + CI_new.loc[ind]["gene_strandR1B_new"]
ind = CI_new[(CI_new["fileTypeR1"] == "Chimeric") & (CI_new["gene_strandR1A_new"] != CI_new["read_strandR1A"]) & (CI_new["gene_strandR1B_new"] != CI_new["read_strandR1B"])].index
CI_new.loc[ind,"refName_newR1"] = CI_new.loc[ind]["chrR1B"] + ":" + CI_new.loc[ind]["geneR1B_uniq"] + ":" + CI_new.loc[ind]["juncPosR1B"].astype(str) + ":" + CI_new.loc[ind]["gene_strandR1B_new"] + "|" + CI_new.loc[ind]["chrR1A"] + ":" + CI_new.loc[ind]["geneR1A_uniq"] + ":" + CI_new.loc[ind]["juncPosR1A"].astype(str) + ":" + CI_new.loc[ind]["gene_strandR1A_new"]
CI_new.loc[ind,"reverse"] = True
if swap_names:
CI_new.loc[ind] = CI_new.loc[ind].rename(columns=name_swap)
ind = CI_new[(CI_new["fileTypeR1"] == "Chimeric") & ((CI_new["gene_strandR1A_new"] == CI_new["read_strandR1A"]) | (CI_new["gene_strandR1B_new"] == CI_new["read_strandR1B"]))].index
CI_new.loc[ind,"refName_newR1"] = CI_new.loc[ind]["chrR1A"] + ":" + CI_new.loc[ind]["geneR1A_uniq"].astype(str) + ":" + CI_new.loc[ind]["juncPosR1A"].astype(str) + ":" + CI_new.loc[ind]["gene_strandR1A_new"] + "|" + CI_new.loc[ind]["chrR1B"] + ":" + CI_new.loc[ind]["geneR1B_uniq"].astype(str) + ":" + CI_new.loc[ind]["juncPosR1B"].astype(str) + ":" + CI_new.loc[ind]["gene_strandR1B_new"]
ind1 = CI_new[(CI_new["refName_newR1"] == "") | (CI_new["refName_newR1"].isna())].index # this ind1 is used to simply replace refName_newR1 with the refName_ABR1
CI_new.loc[ind1,"refName_newR1"] = CI_new.loc[ind1]["refName_ABR1"]
ref_dict = pd.Series(CI_new.refName_newR1.values,index=CI_new.refName_ABR1).to_dict()
rev_dict = | pd.Series(CI_new.reverse.values,index=CI_new.refName_ABR1) | pandas.Series |
"""
Counterfactual explanations for scorecard models.
"""
# <NAME> <<EMAIL>>
# Copyright (C) 2021
import numbers
import time
import numpy as np
import pandas as pd
from ...information import solver_statistics
from ...logging import Logger
from ..scorecard import Scorecard
from .base import BaseCounterfactual
from .counterfactual_information import print_counterfactual_information
from .mip import CFMIP
from .model_data import model_data
from .multi_mip import MCFMIP
from .problem_data import problem_data
logger = Logger(__name__).logger
OBJECTIVES = ("proximity", "closeness")
HARD_CONSTRAINTS = {
"binary": ("diversity_features", "diversity_values"),
"probability": ("diversity_features", "diversity_values",
"min_outcome", "max_outcome"),
"continuous": ("diversity_features", "diversity_values",
"min_outcome", "max_outcome")
}
SOFT_CONSTRAINTS = {
"binary": ("diversity_features", "diversity_values"),
"probability": ("diversity_features", "diversity_values", "diff_outcome"),
"continuous": ("diversity_features", "diversity_values", "diff_outcome")
}
def _check_parameters(scorecard, special_missing, n_jobs, verbose):
# Check scorecard
if not isinstance(scorecard, Scorecard):
raise TypeError("scorecard must be a Scorecard instance.")
scorecard._check_is_fitted()
if not isinstance(special_missing, bool):
raise TypeError("special_missing must be a boolean; got {}."
.format(special_missing))
if not isinstance(n_jobs, numbers.Integral) or n_jobs <= 0:
raise ValueError("n_jobs must be a positive integer; got {}."
.format(n_jobs))
if not isinstance(verbose, bool):
raise TypeError("verbose must be a boolean; got {}.".format(verbose))
def _check_generate_params(query, y, outcome_type, n_cf, method, objectives,
max_changes, actionable_features, hard_constraints,
soft_constraints, variable_names, target_dtype):
# Check query
if not isinstance(query, (dict, pd.DataFrame)):
raise TypeError("query must be a dict or a pandas.DataFrame.")
# Check target
if not isinstance(y, numbers.Number):
raise TypeError("y must be numeric.")
# Check target and outcome type
if target_dtype == "binary":
if outcome_type not in ("binary", "probability"):
raise ValueError("outcome_type must either binary or probability "
"if target_dtype=binary; got {}."
.format(outcome_type))
elif outcome_type == "binary" and y not in [0, 1]:
raise ValueError("y must be either 0 or 1 if outcome_type=binary; "
"got {}.".format(y))
elif outcome_type == "probability" and not 0 <= y <= 1:
raise ValueError("y must be in [0, 1] if outcome_type=probability "
"; got {}.".format(y))
elif target_dtype == "continuous":
if outcome_type != "continuous":
raise ValueError("outcome_type must be continuous if "
"target_dtype=continuous; got {}."
.format(outcome_type))
# Check number of counterfactuals
if not isinstance(n_cf, numbers.Integral) or n_cf <= 0:
raise ValueError("n_cf must be a positive integer; got {}."
.format(n_cf))
if max_changes is not None:
if not isinstance(max_changes, numbers.Integral) or max_changes <= 0:
raise ValueError("max_changes must be a positive integer; got {}."
.format(max_changes))
# Check actionable features
if actionable_features is not None:
if not isinstance(actionable_features, (list, np.ndarray)):
raise TypeError("actionable_features must be either a list or "
"a numpy.ndarray.")
for av in actionable_features:
if av not in variable_names:
raise ValueError("actionable feature {} is not in {}."
.format(av, variable_names))
# Check method and constraints
_check_objectives_method_constraints(
method, objectives, hard_constraints, soft_constraints,
outcome_type)
def _check_objectives_method_constraints(method, objectives, hard_constraints,
soft_constraints, outcome_type):
# Check types
if method not in ("weighted", "hierarchical"):
raise ValueError('Invalid value for method. Allowed string values are '
'"weighted" and "hierarchical".')
if objectives is not None:
if not isinstance(objectives, dict):
raise TypeError("objectives must be a dict.")
if not len(objectives):
raise ValueError("objectives cannot be empty.")
for obj, value in objectives.items():
if obj not in OBJECTIVES:
raise ValueError("objective names must be in {}; got {}."
.format(OBJECTIVES, obj))
elif not isinstance(value, numbers.Number) or value <= 0:
raise ValueError("objective values must be positive; got {}."
.format({obj, value}))
if hard_constraints is not None:
if not isinstance(hard_constraints, (list, tuple, np.ndarray)):
raise TypeError("hard_constraints must a list, tuple or "
"numpy.ndarray.")
if len(hard_constraints) != len(set(hard_constraints)):
raise ValueError("hard_constraints cannot be repeated.")
for hc in hard_constraints:
if hc not in HARD_CONSTRAINTS[outcome_type]:
raise ValueError(
"Invalid hard constraint for outcome_type={}. Allowed "
"strings values are {}.".format(
outcome_type, HARD_CONSTRAINTS[outcome_type]))
if soft_constraints is not None:
if not isinstance(soft_constraints, dict):
raise TypeError("soft_constraints must be a dict.")
for sc, value in soft_constraints.items():
if sc not in SOFT_CONSTRAINTS[outcome_type]:
raise ValueError(
"Invalid soft constraint for outcome_type={}. Allowed "
"string values are {}.".format(
outcome_type, SOFT_CONSTRAINTS[outcome_type]))
elif not isinstance(value, numbers.Number) or value <= 0:
raise ValueError("soft constraint values must be positive; "
"got {}.".format({sc, value}))
# Check combination of hard and soft constraints for outcome type
# probability and continuous. Al least one of:
# [min_outcome, max_outcome, diff_outcome]
# must be included.
if outcome_type in ("probability", "continuous"):
if hard_constraints is None and soft_constraints is None:
raise ValueError("If outcome_type is either probability or "
"continuous, at least one hard constraint or"
"soft constraint must be provided.")
# check number of suitable constraints
_scons = ("min_outcome", "max_outcome", "diff_outcome")
_hard = list(hard_constraints) if hard_constraints is not None else []
_soft = list(soft_constraints) if soft_constraints is not None else []
_hard_soft = np.array(_hard + _soft)
_selected = np.array([c in _scons for c in _hard_soft])
n_selected = np.count_nonzero(_selected)
if n_selected == 0:
raise ValueError('If outcome_type={}, at least one of the '
'hard_constraints "min_outcome", "max_outcome" '
'or the soft_constraint "diff_outcome" must be '
'selected.'.format(outcome_type))
class Counterfactual(BaseCounterfactual):
"""Optimal counterfactual explanations given a scorecard model.
Parameters
----------
scorecard : object
A ``Scorecard`` instance.
special_missing : bool (default=False)
Whether the special and missing bin are considered as valid
counterfactual values.
n_jobs : int, optional (default=1)
Number of cores to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
"""
def __init__(self, scorecard, special_missing=False, n_jobs=1,
verbose=False):
self.scorecard = scorecard
self.special_missing = special_missing
self.n_jobs = n_jobs
self.verbose = verbose
# auxiliary
self._cfs = None
# info
self._optimizer = None
self._status = None
# timing
self._time_fit = None
self._time_solver = None
self._time_postprocessing = None
# flags
self._is_fitted = False
self._is_generated = False
def fit(self, X):
"""Fit counterfactual. Compute problem data to generate counterfactual
explanations.
Parameters
----------
X : pandas.DataFrame (n_samples, n_features)
Training vector, where n_samples is the number of samples.
Returns
-------
self : Counterfactual
Fitted counterfactual.
"""
time_init = time.perf_counter()
if self.verbose:
logger.info("Counterfactual fit started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params(deep=False))
if not isinstance(X, pd.DataFrame):
raise TypeError("X must be a pandas.DataFrame.")
# Scorecard selected variables
self._variable_names = self.scorecard.binning_process_.get_support(
names=True)
for v in self._variable_names:
if v not in X.columns:
raise ValueError("Variable {} not in X. X must include {}."
.format(v, self._variable_names))
if self.verbose:
logger.info("Compute optimization problem data.")
# Problem data
intercept, coef, min_p, max_p, wrange, F, mu = problem_data(
self.scorecard, X[self._variable_names])
self._intercept = intercept
self._coef = coef
self._min_p = min_p
self._max_p = max_p
self._wrange = wrange
self._F = F
self._mu = mu
self._time_fit = time.perf_counter() - time_init
if self.verbose:
logger.info("Counterfactual fit terminated. Time: {:.4f}s"
.format(self._time_fit))
self._is_fitted = True
self._is_generated = False
return self
def information(self, print_level=1):
"""Print overview information about the options settings and
statistics.
Parameters
----------
print_level : int (default=1)
Level of details.
"""
self._check_is_generated()
if not isinstance(print_level, numbers.Integral) or print_level < 0:
raise ValueError("print_level must be an integer >= 0; got {}."
.format(print_level))
if self._optimizer is not None:
solver, _ = solver_statistics("mip", self._optimizer.solver_)
objectives = self._optimizer._objectives
time_solver = self._time_solver
else:
solver = None
objectives = None
time_solver = 0
time_total = self._time_fit + time_solver + self._time_postprocessing
dict_user_options = self.get_params(deep=False)
print_counterfactual_information(
print_level, self._status, solver, objectives, time_total,
self._time_fit, time_solver, self._time_postprocessing,
dict_user_options)
def generate(self, query, y, outcome_type, n_cf, method="weighted",
objectives=None, max_changes=None, actionable_features=None,
hard_constraints=None, soft_constraints=None,
priority_tol=0.1, time_limit=10):
"""Generate counterfactual explanations given objectives and
constraints.
Parameters
----------
query : dict or pandas.DataFrame
Input data points for which a single or multiple counterfactual
explanations are to be generated.
y : int or float
Desired outcome.
outcome_type : str
Desired outcome type. Supported outcome types are "binary",
"probability" and "continuous".
n_cf : int
Number of counterfactuals to be generated.
method : str (default="weighted")
Multi-objective optimization method. Supported methods are
"weighted" and "hierarchical".
objectives : dict or None (default=None)
Objectives with their corresponding weights or priorities,
depending on the method.
max_changes : int or None (default=None)
Maximum number of features to be changed. If None, the maximum
number of changes is half of the number of features.
actionable_features : array-like or None (default=None)
List of actionable features. If None. all features are suitable to
be changed.
hard_constraints : array-like or None (default=None)
Constraint to be enforced when solving the underlying optimization
problem.
soft_constraints : dict or None (default=None)
Constraints to be moved to the objective function as a penalization
term.
priority_tol : float, optional (default=0.1)
Relative tolerance when solving the multi-objective optimization
problem with ``method="hierarchical"``.
time_limit : int (default=10)
The maximum time in seconds to run the optimization solver.
Returns
-------
self : Counterfactual
Generated counterfactuals.
"""
time_init = time.perf_counter()
self._check_is_fitted()
if self.verbose:
logger.info("Counterfactual generation started.")
logger.info("Options: check parameters.")
# Check parameters
_check_generate_params(
query, y, outcome_type, n_cf, method, objectives, max_changes,
actionable_features, hard_constraints, soft_constraints,
self._variable_names, self.scorecard._target_dtype)
# Check priority tolerance
if (not isinstance(priority_tol, numbers.Number) or
not 0 <= priority_tol <= 1):
raise ValueError("priority_tol must be in [0, 1]; got {}."
.format(priority_tol))
# Check time limit
if not isinstance(time_limit, numbers.Number) or time_limit <= 0:
raise ValueError("time_limit must be a positive value in seconds; "
"got {}.".format(time_limit))
# Transform query using scorecard binning process
x, query = self._transform_query(query)
if self.verbose:
logger.info("Options: check objectives and constraints.")
# Set default objectives
if objectives is None:
if method == "weighted":
_objectives = dict(zip(OBJECTIVES, (1, 1)))
else:
_objectives = dict(zip(OBJECTIVES, (2, 1)))
else:
_objectives = objectives
# Set max changes
if max_changes is None:
_max_changes = len(self._variable_names) // 2
else:
_max_changes = max_changes
# Clean constraints given the number of counterfactuals
_hard_constraints, _soft_constraints = self._prepare_constraints(
outcome_type, n_cf, hard_constraints, soft_constraints)
# Indices of non actionable features
non_actionable = self._non_actionable_indices(actionable_features)
# Optimization problem
if self.verbose:
logger.info("Optimizer started.")
time_solver = time.perf_counter()
if n_cf == 1:
optimizer = CFMIP(method, _objectives, _max_changes,
non_actionable, _hard_constraints,
_soft_constraints, priority_tol, self.n_jobs,
time_limit)
else:
optimizer = MCFMIP(n_cf, method, _objectives, _max_changes,
non_actionable, _hard_constraints,
_soft_constraints, priority_tol, self.n_jobs,
time_limit)
# Problem data. Indices is required to construct counterfactual
if self.verbose:
logger.info("Optimizer: build model...")
nbins, metric, indices = model_data(
self.scorecard, x, self.special_missing)
optimizer.build_model(self.scorecard, x, y, outcome_type,
self._intercept, self._coef, self._min_p,
self._max_p, self._wrange, self._F, self._mu,
nbins, metric)
# Optimization
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._status = status
self._optimizer = optimizer
self._time_solver = time.perf_counter() - time_solver
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
time_postprocessing = time.perf_counter()
if status in ("OPTIMAL", "FEASIBLE"):
cfs = []
sc = self.scorecard.table()
if n_cf == 1:
new_indices, new_query, score = self._get_counterfactual(
query, sc, x, nbins, metric, indices, solution)
cfs.append({"outcome_type": outcome_type,
"query": new_query,
"score": score,
"features": new_indices.keys()})
else:
for k in range(n_cf):
new_indices, new_query, score = self._get_counterfactual(
query, sc, x, nbins, metric, indices, solution[k])
cfs.append({"outcome_type": outcome_type,
"query": new_query,
"score": score,
"features": new_indices.keys()})
else:
cfs = None
self._cfs = cfs
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Counterfactual generation terminated. Status: {}. "
"Time: {:.4f}s".format(self._status, self._time_total))
# Completed successfully
self._is_generated = True
return self
def display(self, show_only_changes=False, show_outcome=False):
"""Display the generatedcounterfactual explanations.
Parameters
----------
show_only_changes : boolean (default=False)
Whether to show only changes on feature values.
show_outcome : boolean (default=False)
Whether to add a column with the scorecard outcome. If
``outcome_type`` is "binary" or "probability", the estimated
probability of the counterfactual is added.
Returns
-------
counterfactuals : pandas.DataFrame
Counterfactual explanations.
"""
self._check_is_generated()
self._check_counterfactual_is_found()
if not isinstance(show_only_changes, bool):
raise TypeError("show_only_changes must be a boolean; got {}."
.format(show_only_changes))
if not isinstance(show_outcome, bool):
raise TypeError("show_outcome must be a boolean; got {}."
.format(show_outcome))
cf_queries = []
for cf in self._cfs:
cf_query = cf["query"].copy()
if show_only_changes:
cf_features = cf["features"]
for v in cf_query.columns:
if v not in cf_features:
cf_query[v] = "-"
if show_outcome:
outcome_type = cf["outcome_type"]
if outcome_type == "continuous":
cf_query["outcome"] = cf["score"]
else:
cf_score = cf["score"]
cf_query["outcome"] = 1.0 / (1.0 + np.exp(-cf_score))
cf_queries.append(cf_query)
return pd.concat(cf_queries)
def _get_counterfactual(self, query, sc, x, nbins, metric, indices,
solution):
new_indices = {}
score = 0
for i, v in enumerate(self._variable_names):
new_index = np.array(indices[i])[solution[i]]
if len(new_index):
new_indices[v] = new_index
new_metric = x[i] + np.sum(
[(metric[i][j] - x[i]) * solution[i][j]
for j in range(nbins[i])])
score += self._coef[i] * new_metric
score += self._intercept
new_query = query.copy()
for v, index in new_indices.items():
new_query[v] = sc[sc["Variable"] == v]["Bin"][index].values
return new_indices, new_query, score
def _transform_query(self, query):
if isinstance(query, dict):
query = | pd.DataFrame.from_dict(query, orient="index") | pandas.DataFrame.from_dict |
# -*- coding: UTF-8 -*-
import pandas as pd
import tushare as ts
import matplotlib.pyplot as plt
df = ts.get_k_data("000799", start="2018-01-01", end="2018-06-29")
print (df.head())
print (df.tail())
df.index = | pd.to_datetime(df.date) | pandas.to_datetime |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
| Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T') | pandas.Timestamp |
"""
Compare COVID-19 simulation outputs to data.
Estimate Rt using epyestim
"""
import argparse
import os
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import sys
import matplotlib.dates as mdates
import epyestim
import epyestim.covid19 as covid19
import seaborn as sns
from estimate_Rt_trajectores import *
sys.path.append('../')
from load_paths import load_box_paths
from processing_helpers import *
mpl.rcParams['pdf.fonttype'] = 42
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-stem",
"--stem",
type=str,
help="Name of simulation experiment"
)
parser.add_argument(
"-loc",
"--Location",
type=str,
help="Local or NUCLUSTER",
default="Local"
)
parser.add_argument(
"--plot_only",
action='store_true',
help="If specified only Rt plots will be generated, given Rt was already estimated",
)
parser.add_argument(
"--use_pre_aggr",
action='store_true',
help="If specified uses pre-aggregated new_infections instead of new_infections per trajectory to estimate Rt",
)
return parser.parse_args()
def run_Rt_estimation(exp_name,grp_numbers,smoothing_window, r_window_size):
"""
Rt estimation using median new_infections, aggregated from the trajectoriesDat.csv
Code following online example:
https://github.com/lo-hfk/epyestim/blob/main/notebooks/covid_tutorial.ipynb
smoothing_window of 28 days was found to be most comparable to EpiEstim in this case
r_window_size default is 3 if not specified, increasing r_window_size narrows the uncertainity bounds
"""
simdate = exp_name.split("_")[0]
df = pd.read_csv(os.path.join(exp_dir, f'nu_{simdate}.csv'))
df['date'] = pd.to_datetime(df['date'])
df = df[(df['date'] > pd.Timestamp('2020-03-01'))]
df_rt_all = pd.DataFrame()
for ems_nr in grp_numbers:
if ems_nr == 0:
region_suffix = "illinois"
else:
region_suffix = f'covidregion_{str(ems_nr)}'
if region_suffix not in df["geography_modeled"].unique():
continue
mdf = df[df['geography_modeled'] == region_suffix]
mdf = mdf.set_index('date')['cases_new_median']
"""Use default distributions (for covid-19)"""
si_distrb, delay_distrb = get_distributions(show_plot=False)
df_rt = covid19.r_covid(mdf[:-1], smoothing_window=smoothing_window, r_window_size=r_window_size)
df_rt['geography_modeled'] = region_suffix
df_rt.reset_index(inplace=True)
df_rt = df_rt.rename(columns={'index': 'date',
'Q0.5': 'rt_median',
'Q0.025': 'rt_lower',
'Q0.975': 'rt_upper'})
df_rt['model_date'] = pd.Timestamp(simdate)
df_rt = df_rt[['model_date', 'date', 'geography_modeled', 'rt_median', 'rt_lower', 'rt_upper']]
# df_rt['smoothing_window'] =smoothing_window
# df_rt['r_window_size'] = r_window_size
df_rt_all = df_rt_all.append(df_rt)
df_rt_all['rt_pre_aggr'] = use_pre_aggr
df_rt_all.to_csv(os.path.join(exp_dir, 'rtNU.csv'), index=False)
if not 'rt_median' in df.columns:
df['date'] = pd.to_datetime(df['date'])
df_rt_all['date'] = pd.to_datetime(df_rt_all['date'])
df_with_rt = pd.merge(how='left', left=df, right=df_rt_all,
left_on=['date', 'geography_modeled'],
right_on=['date', 'geography_modeled'])
df_with_rt.to_csv(os.path.join(exp_dir, f'nu_{simdate}.csv'), index=False)
else:
print("Warning: Overwriting already present Rt estimates")
df = df.drop(['rt_median', 'rt_lower', 'rt_upper'], axis=1)
df['date'] = pd.to_datetime(df['date'])
df_rt_all['date'] = pd.to_datetime(df_rt_all['date'])
df_with_rt = pd.merge(how='left', left=df, right=df_rt_all,
left_on=['date', 'geography_modeled'],
right_on=['date', 'geography_modeled'])
df_with_rt.to_csv(os.path.join(exp_dir, f'nu_{simdate}.csv'), index=False)
return df_rt
def use_Rt_trajectories(exp_name,exp_dir,grp_numbers, min_date = None, use_pre_aggr=True ):
"""
If exist load rt_trajectories_aggr, otherwise rerun rt estimation for new_infections per trajectory:
Note: estimation per trajectory may take >1 hour or may run out of memory, depending on date and scenarios
estimate_Rt_trajectories.py (separate script) allows to run in parallel per region on NUCLUSTER.
"""
simdate = exp_name.split("_")[0]
if min_date is None:
min_date = pd.Timestamp('2021-01-01')
if os.path.exists(os.path.join(exp_dir, 'rt_trajectories_aggr.csv')):
df = pd.read_csv(os.path.join(exp_dir, 'rt_trajectories_aggr.csv'))
df['date'] = pd.to_datetime(df['date'])
else:
run_Rt_estimation_trajectories(exp_name,exp_dir,grp_numbers, smoothing_window=28, r_window_size=3, min_date = min_date)
df = run_combine_and_plot(exp_dir,grp_numbers=grp_numbers, last_plot_day=min_date )
df.rename(columns={"CI_50": "rt_median", "amin": "rt_lower", "amax": "rt_upper"}, inplace=True)
df = df.drop(['CI_2pt5', 'CI_97pt5', 'CI_25','CI_75'], axis=1)
if df['date'].min() > pd.Timestamp('2020-03-01') :
if use_pre_aggr:
rt_name = 'rt_trajectories_aggr.csv'
rdf = pd.read_csv(os.path.join(wdir, 'simulation_saved', rt_name))
rdf.rename(columns={"CI_50": "rt_median", "amin": "rt_lower", "amax": "rt_upper"}, inplace=True)
rdf = rdf.drop(['CI_2pt5', 'CI_97pt5', 'CI_25', 'CI_75'], axis=1)
else:
rt_name = 'rtNU.csv'
rdf = pd.read_csv(os.path.join(wdir, 'simulation_saved', rt_name))
"""Read saved historical Rt estimates"""
rdf = rdf[df.columns]
rdf['date'] = pd.to_datetime(rdf['date'])
rdf = rdf[rdf['date'] < df['date'].min()]
df_rt_all = rdf.append(df)
del rdf, df
else:
df_rt_all = df
del df
df_rt_all['rt_pre_aggr'] = use_pre_aggr
df_rt_all.to_csv(os.path.join(exp_dir, 'rtNU.csv'), index=False)
""" Add to civis deliverables """
df = pd.read_csv(os.path.join(exp_dir, f'nu_{simdate}.csv'))
df['date'] = pd.to_datetime(df['date'])
df_rt_all['date'] = pd.to_datetime(df_rt_all['date'])
df_rt_all = df_rt_all.drop(['model_date'], axis=1)
if not 'rt_median' in df.columns:
df_with_rt = pd.merge(how='left', left=df, right=df_rt_all,
left_on=['date', 'geography_modeled'],
right_on=['date', 'geography_modeled'])
df_with_rt.to_csv(os.path.join(exp_dir, f'nu_{simdate}.csv'), index=False)
else:
print("Warning: Overwriting already present Rt estimates")
df = df.drop(['rt_median', 'rt_lower', 'rt_upper','rt_pre_aggr'], axis=1)
df_with_rt = pd.merge(how='left', left=df, right=df_rt_all,
left_on=['date', 'geography_modeled'],
right_on=['date', 'geography_modeled'])
df_with_rt.to_csv(os.path.join(exp_dir, f'nu_{simdate}.csv'), index=False)
if __name__ == '__main__':
test_mode = False
if test_mode:
stem = "20210423_IL_localeEMS_1_testRtnew_baseline"
Location = 'Local'
plot_only=False
use_pre_aggr= False
else:
args = parse_args()
stem = args.stem
Location = args.Location
plot_only = args.plot_only
use_pre_aggr = args.use_pre_aggr
first_plot_day = | pd.Timestamp('2020-03-01') | pandas.Timestamp |
from ms_learn_crawler import *
import calendar
import time
import pandas as pd
import pickle
import os
data_month = 1
data_year = 2022
f = open("portfolio.config", "r")
portfolio_urls = f.readlines()
cert_info = {}
all_cert_lp_info = pd.DataFrame()
all_cert_module_info = pd.DataFrame()
crawler = ms_learn_crawler()
## Get all the LP info for each cert
cert_lp_pickle_file_name = "../data/"+str(data_month)+"-"+str(data_year)+"-all_cert_lp_info.pkl"
if(os.path.exists(cert_lp_pickle_file_name)):
#read from file to avoid reprocessing
with open(cert_lp_pickle_file_name, 'rb') as file:
# Call load method to deserialze
all_cert_lp_info = pickle.load(file)
else:
# do the processing
for cert in portfolio_urls:
learn_uids = crawler.get_learn_paths_for_cert(cert)
if len(learn_uids)>0:
lp_metadata = crawler.get_learn_path_metadata(learn_uids)
df = pd.DataFrame(lp_metadata, columns = ['LearningPathUid', 'LiveUrl','TotalModules'])
last_slash = cert.rfind("/")
cert_name = cert[last_slash+1:]
df['Certification'] = cert_name.strip()
if all_cert_lp_info.size == 0:
all_cert_lp_info = df
else:
all_cert_lp_info = pd.concat([all_cert_lp_info,df],sort=False)
#print(all_cert_lp_info)
# Open a file and use dump()
with open(cert_lp_pickle_file_name, 'wb') as file:
# A new file will be created
pickle.dump(all_cert_lp_info, file)
print(all_cert_lp_info.describe())
input("Press Enter to continue...")
lp_data = pd.read_csv('../data/learning_path_stats-latest.csv', encoding = 'unicode_escape', engine ='python')
all_cert_lp_info = pd.merge(all_cert_lp_info, lp_data,on='LiveUrl')
all_cert_lp_info.rename(columns={'LiveUrl': 'LearningPathUrl'}, inplace=True)
all_cert_lp_info.columns.values[4] = "Title"
all_cert_lp_info['Month'] = data_month
all_cert_lp_info['Year'] = data_year
#Drop MSAuthor and GitHubAuthor columns, export w/o header
#all_cert_lp_info.drop(['MSAuthor', 'GitHubAuthor'], axis = 1)
print(all_cert_lp_info.columns)
all_cert_lp_info_final = all_cert_lp_info[["LearningPathUid_x","LearningPathUrl","TotalModules","Certification","Title","Total modules","Visitors","Page Views","LPCompletedRate","LPStarted","LPComplete","Trophies","Shared Trophies","Avg Minutes per Visitor","Bookmard Users","Duration(min)","LearningPathUid_y","Roles","Products","Levels","Month","Year"]]
all_cert_lp_info_final.to_csv('../processed_data/portfolio_cert_lp_info.csv',mode='w',header = False)
learn_path_urls = all_cert_lp_info['LearningPathUrl'].tolist()
all_cert_module_info_pickle_file_name = "../data/"+str(data_month)+"-"+str(data_year)+"-all_cert_module_info.pkl"
if(os.path.exists(all_cert_module_info_pickle_file_name)):
#read from file to avoid reprocessing
with open(all_cert_module_info_pickle_file_name, 'rb') as file:
# Call load method to deserialze
all_cert_module_info = pickle.load(file)
else:
for learn_path_url in learn_path_urls:
module_uids = crawler.get_learn_path_modules(learn_path_url)
module_metadata = crawler.get_module_metadata(module_uids)
df = | pd.DataFrame(module_metadata, columns = ['LiveUrl','Uid','Url']) | pandas.DataFrame |
import pandas as pd
import os
import gzip
import numpy as np
import h5py
from scipy.sparse import coo_matrix
from io import StringIO
def divide_name(filename):
#home-made os.path.splitext, for it can't handle "name.a.b.c" properly
basename = os.path.basename(filename)
parts = basename.split(".") #split return >= 1 length list
if len(parts) == 1:
return parts[0], ""
else:
return parts[0], "."+".".join(parts[1:])
def parse_pairs(filename:str)->"Cell":
'''
read from 4DN's standard .pairs format
compatible with all hickit originated pairs-like format
'''
# read comments
with gzip.open(filename,"rt") as f:
comments = []
chromosomes = []
lengths = []
for line in f.readlines():
if line[0] != "#":
break
if line.startswith("#chromosome") or line.startswith("#chromsize"):
chrom, length = line.split(":")[1].strip().split()
chromosomes.append(chrom)
lengths.append(int(length))
if line.startswith("#columns:"):
columns = line.split(":")[1].strip().split()
## comment lines are stored in dataframe.attrs["comment"]
comments.append(line)
dtype_array = {"readID":"category",
"chr1":pd.CategoricalDtype(categories=chromosomes),
"pos1":"int",
"chr2": | pd.CategoricalDtype(categories=chromosomes) | pandas.CategoricalDtype |
# Written by: <NAME>, @dataoutsider
# Viz: "On the Move", enjoy!
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from math import cos, sin, pi
class point:
def __init__(self, side, region, x, y, path = -1, value = -1, state = ''):
self.side = side
self.region = region
self.x = x
self.y = y
self.path = path
self.value = value
self.state = state
def to_dict(self):
return {
'side' : self.side,
'region' : self.region,
'x' : self.x,
'y' : self.y,
'path' : self.path,
'value' : self.value,
'state' : self.state }
#region Load Data
df_states_data = pd.read_csv(os.path.dirname(__file__) + '/state_data.csv', engine='python')
df_state_id = pd.read_csv(os.path.dirname(__file__) + '/Migration_States.csv', engine='python')
df_state_id.reset_index(inplace=True)
df_state_id = df_state_id[['index', 'State']]
df_region = pd.read_csv(os.path.dirname(__file__) + '/Migration_Regions.csv', engine='python')
df_final = pd.merge(df_states_data, df_state_id, how='left', left_on='state_id', right_on='index')
df_final = pd.merge(df_final, df_state_id, how='left', left_on='state_id_source', right_on='index')
df_final = pd.merge(df_final, df_region, how='left', left_on='State_x', right_on='State')
df_final = pd.merge(df_final, df_region, how='left', left_on='State_y', right_on='State')
#df_final.to_csv(os.path.dirname(__file__) + '/Region_Connection.csv', encoding='utf-8', index=False)
df_states_in = df_final.loc[df_final['direction'] == 'in']
df_states_in = df_states_in.loc[df_final['Region_x'].notnull() & df_final['Region_y'].notnull()]
df_states_in = df_states_in[['State_x', 'Region_x', 'Diamond_x', 'State_y', 'Region_y', 'Diamond_y', 'value']]
df_states_in = df_states_in.loc[:,~df_states_in.columns.duplicated()]
df_states_out = df_final.loc[df_final['direction'] == 'out']
df_states_out = df_states_out.loc[df_final['Region_x'].notnull() & df_final['Region_y'].notnull()]
df_states_out = df_states_out[['State_x', 'Region_x', 'Diamond_x', 'State_y', 'Region_y', 'Diamond_y', 'value']]
df_states_out = df_states_out.loc[:,~df_states_out.columns.duplicated()]
df_states_in_group = df_states_in.groupby(['State_x'])['value'].sum().reset_index()
df_states_out_group = df_states_out.groupby(['State_x'])['value'].sum().reset_index()
df_final = pd.merge(df_states_in_group, df_states_out_group, how='left', on='State_x')
df_final = | pd.merge(df_final, df_region, how='left', left_on='State_x', right_on='State') | pandas.merge |
import warnings
import os
import functools
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.utils.validation import check_array
import enricher.regulon.regulon_enrichment as regulon_enrichment
import enricher.features.expression_utils as expression_utils
import enricher.regulon.regulon_utils as regulon_utils
import argparse
warnings.simplefilter("ignore", UserWarning)
test = 'test'
if __name__ == '__main__':
DATA_PATH = os.path.join(os.getcwd(), 'data')
else:
dirname = os.path.dirname(__file__)
DATA_PATH = os.path.join(dirname, 'data')
sif_file = DATA_PATH + '/PathwayCommons9.All.hgnc.sif.gz'
sec_intx_file = DATA_PATH + '/secondary_intx_regulon.pkl'
class Error(Exception):
"""Base class for other exceptions"""
class OmicError(Error):
"""Raised when duplications in omic features or samples are detected"""
class Enrichment(object):
"""Base enrichment class for predicting regulon enrichment from -omic datasets.
Args:
cohort :
expr (:obj:`pd.DataFrame`, shape = [n_feats, n_samps])
regulon (:obj: `pandas DataFrame`)
regulon_size (int): Minimum number of edges for a given regulator.
sec_intx_file (str): Path to pre-compiled secondary interaction network.
"""
def __init__(self, cohort, expr, regulon=None, regulon_size=15, sec_intx=sec_intx_file,
thresh_filter=0.1):
if not isinstance(expr, pd.DataFrame):
raise TypeError("`expr` must be a pandas DataFrame, found "
"{} instead!".format(type(expr)))
if len(set(expr.index)) != expr.shape[0]:
print(len(set(expr.index)))
print(expr.shape)
raise OmicError("Duplicate feature names in {cohort} dataset!".format(cohort=cohort))
if len(set(expr.columns)) != expr.shape[1]:
raise OmicError("Duplicate sample names in {cohort} dataset!".format(cohort=cohort))
self.cohort = cohort
self.expr = expr
if regulon is None:
self.regulon = regulon_utils.read_pickle(sec_intx)
else:
self.regulon = regulon
self.scaler_type = None
self.scaled = False
self.regulon_size = regulon_size
self.regulon_weights = None
self.thresh_filter = thresh_filter
self.total_enrichment = None
self.delta = None
self.local_enrichment = None
self.regulators = None
self.quant_nes = None
def __str__(self):
return """------\nCohort: {}\nn-features: {}\nn-samples: {}\nscaler: {}\nscaled:\
{}\nregulon threshold: {}\nregulon nodes: {}\nregulon edges: {}\n------\n""".\
format(self.cohort,
self.expr.shape[0],
self.expr.shape[1],
self.scaler_type,
self.scaled, self.regulon_size,
len(self.regulon.UpGene.unique()),
self.regulon.shape[0])
def __repr__(self):
return """------\nCohort: {}\nn-features: {}\nn-samples: {}\nscaler: {}\nscaled: {}\
\nregulon threshold: {}\nregulon nodes: {}\nregulon edges: {}\n------\n""".\
format(self.cohort,
self.expr.shape[0],
self.expr.shape[1],
self.scaler_type,
self.scaled,
self.regulon_size,
len(self.regulon.UpGene.unique()),
self.regulon.shape[0])
@staticmethod
def _preprocess_data(expr, scaler_type='robust', thresh_filter=0.1):
""" Centers expression data based on a specified data scaler algorithm
Args:
expr (pandas DataFrame obj): pandas DataFrame of [n_features, n_samples]
scaler_type (str): Scaler to normalized features/samples by:
standard | robust | minmax | quant
thresh_filter (float): Prior to normalization remove features that have
a standard deviation per feature less than {thresh_filter}
Returns:
scaled_frame (:obj: `pandas DataFrame`) : pandas DataFrame containing
scaled expression data of shape [n_samples, n_features]
"""
# By default, the input is checked to be a non-empty 2D array containing
# only finite values.
_ = check_array(expr)
scaler_opt = {'standard': expression_utils.StandardScaler(),
'robust': expression_utils.RobustScaler(),
'minmax': expression_utils.MinMaxScaler(),
'quant': expression_utils.QuantileTransformer()}
if scaler_type not in scaler_opt:
raise KeyError('{scaler_type} not supported scaler_type!'
' Supported types include: {keys}'.format(
scaler_type=scaler_type, keys=' | '.join(scaler_opt.keys())))
scaler = scaler_opt[scaler_type]
# Transpose frame to correctly orient frame for scaling and machine learning algorithms
print('--- log2 normalization ---')
expr_t = expr[(expr.std(axis=1) > thresh_filter)].T
expr_lt = expression_utils.log_norm(expr_t)
print('--- Centering features with {} scaler ---'.format(scaler_type))
scaled_frame = pd.DataFrame(scaler.fit_transform(expr_lt),
index=expr_lt.index,
columns=expr_lt.columns)
return scaled_frame
@staticmethod
def _prune_regulon(expr, regulon, regulon_size):
""" Prunes regulon with secondary interactions that do not meet
the necessary number of downstream interactions metric {regulon_size}
Args:
expr (pandas DataFrame obj): pandas DataFrame of [n_samples, n_features]
regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight
interactions between regulator and downstream members of its regulon
of shape [len(Target), ['Regulator','Target','MoA','likelihood']
regulon_size (int) : number of downstream interactions required for a
given regulator in order to calculate enrichment score
Returns:
filtered_regulon (:obj: `pandas DataFrame`) : pandas DataFrame containing weight
interactions between regulator and downstream members of its regulon of shape :
[len(Target), ['Regulator','Target','MoA','likelihood']
"""
expr_filtered_regulon = regulon[
((regulon.UpGene.isin(expr.columns)) & (regulon.DownGene.isin(expr.columns)))].\
set_index('UpGene')
idx = (expr_filtered_regulon.index.value_counts() >= regulon_size)
filtered_regulon = expr_filtered_regulon.loc[idx[idx == True].index].reset_index()
edges = list(set(filtered_regulon.UpGene) | set(filtered_regulon.DownGene))
sub_expr = expr.loc[:,edges]
return filtered_regulon, sub_expr
@staticmethod
def _structure_weights(regulator, pruned_regulon, f_statistics, r_frame, p_frame):
""" Calculates weights associated with regulators. Weights are the summation of
the F-statistic and absolute spearman correlation coefficient. The weight
retains the sign of the spearman correlation coefficient.
Args:
regulator (str): A feature to assign weights to downstream interactions
pruned_regulon (:obj:`pd.DataFrame`, shape = [n_interactions, 3]
f_statistics (dict) : Dictionary with key:{regulator} key and
r_frame (:obj:`pd.DataFrame`), shape = [n_features, n_features]
p_frame (:obj:`pd.DataFrame`), shape = [n_features, n_features]
Returns:
weights_ordered (:obj:`pd.DataFrame`), shape = [n_interactions, 3]
"""
sub_regul = pruned_regulon[(pruned_regulon['UpGene'] == regulator)]
targs = sub_regul.DownGene
p_ = p_frame.loc[targs, regulator]
p_.name = 'likelihood'
f_ = f_statistics[regulator][0]
r_ = r_frame.loc[targs, regulator]
w_ = (f_ + abs(r_)) * np.sign(r_)
w_.index.name = 'Target'
w_.name = 'MoA'
weights = w_.to_frame()
weights['likelihood'] = p_
weights['Regulator'] = regulator
weights_ordered = weights.reset_index().\
reindex(['Regulator', 'Target', 'MoA', 'likelihood'], axis=1)\
.set_index('Regulator')
return weights_ordered
def scale(self, scaler_type='robust', thresh_filter=0.1):
""" Fit and scale expression data based on a specified data scaler algorithm
Args:
scaler_type (str): Scaler to normalized features/samples by:
standard | robust | minmax | quant
thresh_filter (float): Prior to normalization remove features that do not have
the mean unit of a feature (i.e. 1 tpm) is greater than {thresh_filter}
"""
self.scaler_type = scaler_type
if scaler_type == None:
warnings.warn('Proceeding without scaling dataset!')
self.expr = self.expr.T
else:
self.expr = self._preprocess_data(self.expr, self.scaler_type, thresh_filter)
self.scaled = True
def assign_weights(self):
"""
Generate normalized likelihood weights and assigns those weights to the absolute gene
expression signature
"""
if not self.scaled:
warnings.warn('Assigning interaction weights without scaling dataset!')
pruned_regulon, sub_expr = self._prune_regulon(self.expr, self.regulon, self.regulon_size)
self.expr = sub_expr
# noinspection PyTypeChecker
r, p = regulon_utils.spearmanr(self.expr)
r_frame = pd.DataFrame(r, columns=self.expr.columns, index=self.expr.columns)
p_frame = pd.DataFrame(p, columns=self.expr.columns, index=self.expr.columns)
F_statistics = {regulator: regulon_utils.f_regression(
self.expr.reindex(frame.DownGene, axis=1),
self.expr.reindex([regulator], axis=1).values.ravel())
for regulator, frame in pruned_regulon.groupby('UpGene')}
weights = pd.concat([self._structure_weights(regulator,
pruned_regulon,
F_statistics,
r_frame,
p_frame)
for regulator in F_statistics])
self.regulon_weights = weights[~np.isinf(weights.MoA)]
def calculate_enrichment(self):
"""
Subset and generate regulator activity scores based on rank ordering of up-regulated
and down-regulated targets
"""
if self.regulon_weights is None:
raise TypeError("`regulon_weights` must be assigned prior to enrichment calculation,"
" found {} instead!".format(type(self.regulon_weights)))
quant_nes = regulon_enrichment.quantile_nes_score(self.regulon_weights, self.expr.T)
self.quant_nes = quant_nes
self.regulators = self.regulon_weights.index.unique()
print('--- Calculating regulon enrichment scores ---')
nes_list, local_enrich_list, delta_list = zip(*list(map(functools.partial(regulon_enrichment.score_enrichment,
expr=self.expr,
regulon=self.regulon_weights,
quant_nes=quant_nes),
tqdm(self.regulators))))
self.total_enrichment = pd.concat(nes_list, axis=1)
self.local_enrichment = pd.concat(local_enrich_list, axis=1)
self.delta = | pd.concat(delta_list, axis=1) | pandas.concat |
import os
import sys
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, train_test_split, StratifiedKFold
import PIL
from PIL import Image
import io
import cv2
from keras.datasets import mnist
import multiprocessing as mp
from multiprocessing import Pool, Manager, Process
from functools import partial
from . import logging_daily
from . import utils
from keras.utils import to_categorical
######################################################################
# Base Reader
######################################################################
class BaseReader(object):
"""Inherit from this class when implementing new readers."""
def __init__(self, log, path_info, network_info, verbose=True):
self.log = log
self.verbose = verbose
self.data_path = path_info['data_info']['data_path']
if network_info['model_info']['normalize_sym'] == 'True': self.normalize_sym = True
else: self.normalize_sym = False
if network_info['model_info']['n_label'] == 'None': self.n_label = None
else: self.n_label = int(network_info['model_info']['n_label'])
if network_info['model_info']['augment'] == 'True': self.augment = True
else: self.augment = False
self.x_list = None
self.img_shape = None
def read_dataset(self, data_path):
raise NotImplementedError()
def get_dataset(self):
raise NotImplementedError()
def get_cv_index(self, nfold=5):
raise NotImplementedError()
def get_augment(self, x):
for i in range(x.shape[0]):
if np.random.randint(2, size=1):
# Flip Horizontally
if np.random.randint(2, size=1):
x[i] = x[i,:,::-1,:] # (N, H, W, C)
# Channel Noise
if np.random.randint(2, size=1):
if np.random.randint(2, size=1):
# uniform noise
noise = np.random.uniform(0,0.05,(x.shape[1],x.shape[2]))
picked_ch = np.random.randint(3, size=1)[0]
x[i,:,:,picked_ch] += noise
x[i,:,:,picked_ch] = np.clip(x[i,:,:,picked_ch], a_min=0., a_max=1.)
elif np.random.randint(2, size=1):
# gray
x[i,:,:,:] = np.repeat(np.expand_dims(np.dot(x[i,:,:], [0.299, 0.587, 0.114]), axis=-1), 3, axis=-1)
return x
def show_class_information(self, y=None):
if np.any(y == None): y_table = self.y_table
else: y_table = pd.Series(y)
y_counts = y_table.value_counts()
self.log.info('-------------------------------------------------')
self.log.info('Images per Class')
self.log.info('\n%s', y_counts)
self.log.info('-------------------------------------------------')
self.log.info('Summary')
self.log.info('\n%s', y_counts.describe())
self.log.info('-------------------------------------------------')
# def write_embeddings_metadata(self, embedding_metadata_path, e_x, e_y):
# with open(embedding_metadata_path,'w') as f:
# f.write("Index\tLabel\tClass\n")
# for index,label in enumerate(e_y):
# f.write("%d\t%s\t%d\n" % (index,"fake",label)) # fake
# for index,label in enumerate(e_y):
# f.write("%d\t%s\t%d\n" % (len(e_y)+index,"true",10)) # true
def get_image_shape(self):
return self.img_shape
def get_cv_index(self, nfold=5, random_state = 12):
self.log.info('%d-fold Cross Validation Cut' % nfold)
kf = KFold(n_splits=nfold, shuffle=True, random_state=random_state)
return kf.split(range(self.y.shape[0]))
def get_training_validation_index(self, idx, validation_size=0.2):
return train_test_split(idx, test_size = validation_size)
def get_dataset(self):
return self.x, self.y
def get_label(self):
return self.y
def get_n_label(self):
return self.num_classes
def handle_imbalance(self, train_idx, minarity_group_size = 0.3, minarity_ratio = 0.3, seed=12):
self.log.info('----------------------------------------------------')
self.log.info('Handle imbalance')
self.log.info('Minarity_group_size : %s' % minarity_group_size)
self.log.info('Minarity_ratio (per group) : %s' % minarity_ratio)
self.log.info('----------------------------------------------------')
np.random.seed(seed)
minarities = np.random.choice(self.y_class, size= int(minarity_group_size * self.y_class.shape[0]))
pick = []
if len(minarities) > 0:
for i, minarity in enumerate(minarities):
minarity_index = self.y_index.get_loc(minarity)
delete_size = int(np.sum(minarity_index) * (1-minarity_ratio))
pick.append(np.random.choice(np.where(minarity_index)[0], replace=False, size=delete_size))
self.log.info('minarity class - %s : deleted %s of %s' %(minarity, delete_size, np.sum(minarity_index)))
pick = np.concatenate(pick)
train_idx = np.setdiff1d(train_idx, pick)
if self.verbose == True: self.show_class_information(self.y[train_idx])
return train_idx
def class_to_categorical(self, y):
return to_categorical(self.class_to_int(y), self.num_classes)
def categorical_to_series(self, y_coded):
return pd.Series(np.argmax(y_coded, axis=1)).map(self.y_int_to_class)
def class_to_int(self, y):
return np.array(pd.Series(y).map(self.y_class_to_int))
def int_to_class(self, y_int):
return pd.Series(y_int).map(self.y_int_to_class)
#########################################################################################################
# Toy Sample Reader
#########################################################################################################
class ToyReader(BaseReader):
def __init__(self, log, path_info, network_info, verbose=True):
super(ToyReader,self).__init__(log, path_info, network_info, verbose)
self.read_dataset(nlabel=self.n_label)
if verbose: self.show_class_information()
def read_dataset(self, nlabel=None):
dir_path = self.data_path
self.x = np.load('%s/x.npy'%dir_path).astype(np.float32)
self.x = self.x.reshape(self.x.shape[0],int(np.sqrt(self.x.shape[1])),int(np.sqrt(self.x.shape[1])),1)
self.y = np.load('%s/y.npy'%dir_path)
self.img_shape = self.x.shape[1:]
if not nlabel==None:
y_table = pd.Series(self.y)
selected_class = y_table.unique()[:nlabel]
selected_class = y_table.isin(selected_class)
self.x_list = self.x[selected_class]
self.y = self.y[selected_class]
## to categorical ####################
self.y_table = pd.Series(self.y)
self.y_index = pd.Index(self.y)
self.y_class = np.sort(self.y_table.unique())
self.y_class_to_int = dict(zip(self.y_class, range(self.y_class.shape[0])))
self.y_int_to_class = dict(zip(range(self.y_class.shape[0]), self.y_class))
self.num_classes = len(self.y_class)
######################################
def get_batch(self, idxs):
img_batches = self.x[idxs]
y = self.class_to_int(self.y[idxs])
return img_batches, y
def class_to_categorical(self, y):
return to_categorical(y, self.num_classes)
def categorical_to_class(self, y_coded):
return np.argmax(y_coded, axis=1)
def except_class(self, train_idx, except_class):
self.log.info('----------------------------------------------------')
for unknown_class in except_class:
self.log.info('Except class %d' % int(unknown_class))
unknown_class = int(unknown_class)
train_idx = train_idx[self.y[train_idx]!=unknown_class]
if self.verbose: self.show_class_information(self.y[train_idx])
self.log.info('----------------------------------------------------')
return train_idx
#########################################################################################################
# MNIST
#########################################################################################################
class MNISTReader(BaseReader):
def __init__(self, log, path_info, network_info, mode='train', verbose=True):
super(MNISTReader,self).__init__(log, path_info, network_info, verbose)
self.read_dataset(nlabel=self.n_label)
if verbose: self.show_class_information()
def read_dataset(self, nlabel=None):
(x_train, y_train), (x_test, y_test) = mnist.load_data()
self.x = np.concatenate((x_train, x_test), axis=0)
self.y = np.concatenate((y_train, y_test), axis=0)
if not nlabel==None:
y_table = pd.Series(self.y)
selected_class = y_table.unique()[:nlabel]
selected_class = y_table.isin(selected_class)
self.x_list = self.x[selected_class]
self.y = self.y[selected_class]
## to categorical ####################
self.y_table = pd.Series(self.y)
self.y_index = pd.Index(self.y)
self.y_class = np.sort(self.y_table.unique())
# self.y_class_to_int = dict(zip(self.y_class, range(self.y_class.shape[0])))
# self.y_int_to_class = dict(zip(range(self.y_class.shape[0]), self.y_class))
self.num_classes = len(self.y_class)
######################################
# normalize
if self.normalize_sym:
# force it to be of shape (...,28,28,1) with range [-1,1]
self.x = ((self.x - 127.5) / 127.5).astype(np.float32)
else:
self.x = (self.x / 225.).astype(np.float32)
self.x = np.expand_dims(self.x, axis=-1)
self.img_shape = self.x.shape[1:]
def get_batch(self, idxs):
img_batches = self.x[idxs]
if self.augment:
img_batches = self.get_augment(img_batches)
# y = self.class_to_int(self.y[idxs])
y = self.y[idxs]
return img_batches, y
def class_to_categorical(self, y):
return to_categorical(y, self.num_classes)
def categorical_to_class(self, y_coded):
return np.argmax(y_coded, axis=1)
def except_class(self, train_idx, except_class):
self.log.info('----------------------------------------------------')
for unknown_class in except_class:
self.log.info('Except class %d' % int(unknown_class))
unknown_class = int(unknown_class)
train_idx = train_idx[self.y[train_idx]!=unknown_class]
if self.verbose: self.show_class_information(self.y[train_idx])
self.log.info('----------------------------------------------------')
return train_idx
#########################################################################################################
# Omniglot
#########################################################################################################
class OmniglotReader(BaseReader):
def __init__(self, log, path_info, network_info, mode='train', verbose=True):
super(OmniglotReader,self).__init__(log, path_info, network_info, verbose)
self.mode = mode
self.read_dataset(nlabel=self.n_label)
if verbose: self.show_class_information()
def read_dataset(self, nlabel=None):
self.log.info('-------------------------------------------------')
self.log.info('Construct Dataset')
self.log.info('-------------------------------------------------')
self.log.info('Loading Omniglot dataset information')
self.img_shape = (105,105,1)
if self.mode=='train': data_type = 'images_background'
elif self.mode=='train_small1': data_type = 'images_background_small1'
elif self.mode=='train_small2': data_type = 'images_background_small2'
else: data_type = 'images_evaluation'
self.x_list = np.load('%s/%s_x_list.npy' % (self.data_path, data_type))
self.y = np.load('%s/%s_y.npy' % (self.data_path, data_type))
if not nlabel==None:
y_table = pd.Series(self.y)
selected_class = y_table.unique()[:nlabel]
selected_class = y_table.isin(selected_class)
self.x_list = self.x_list[selected_class]
self.y = self.y[selected_class]
# else:
# y_table = pd.Series(self.y)
# y_counts = y_table.value_counts()
# selected_class = y_counts[y_counts >= 5].keys()
# selected_class = y_table.isin(selected_class)
# self.x_list = self.x_list[selected_class]
# self.y = self.y[selected_class]
# self.not_used_class = y_counts[y_counts < 5].keys()
## to categorical ####################
self.y_table = pd.Series(self.y)
self.y_index = pd.Index(self.y)
self.y_class = np.sort(self.y_table.unique())
self.y_class_to_int = dict(zip(self.y_class, range(self.y_class.shape[0])))
self.y_int_to_class = dict(zip(range(self.y_class.shape[0]), self.y_class))
self.num_classes = len(self.y_class)
######################################
self.y_alphabet = np.array([xpath.split('/')[-3] for xpath in self.x_list])
# TODO except class list...
# def except_class(self, train_idx, unknown_class='9'):
# train_idx = np.array(train_idx)
# return train_idx[self.y[train_idx]!=unknown_class]
def get_cv_index(self, nfold=5, random_state = 12):
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html
self.log.info('Stratified %d-fold Cross Validation Cut' % nfold)
kf = StratifiedKFold(n_splits=nfold, shuffle=True, random_state=random_state)
return kf.split(range(self.y.shape[0]), self.y)
def get_dataset(self):
return self.x_list, self.y
def get_y_alphabet_class(self):
return self.y_alphabet
def get_label_name(self):
return np.array(self.y_class)
def get_batch(self, idxs):
try:
batch_imgs = []
batch_idxs = []
for i in idxs:
try:
batch_imgs.append(self._read_omniglot_image(self.x_list[i]))
batch_idxs.append(i)
except Exception as e:
raise ValueError(e)
batch_imgs = np.array(batch_imgs)
batch_idxs = np.array(batch_idxs)
# if self.augment and np.random.choice([0,1], 1, replace=False, p=[0.8,0.2]):
if self.augment:
batch_imgs = self.get_augment(batch_imgs)
if self.normalize_sym:
batch_imgs = (batch_imgs - 0.5) * 2.
y = self.class_to_int(self.y[np.array(batch_idxs)])
return batch_imgs, y
except Exception as e:
raise ValueError(e)
def _read_omniglot_image(self, filename):
try:
im = Image.open(filename)
# target_shape = np.array([self.img_shape[1],self.img_shape[0]])
# im = im.resize(target_shape, PIL.Image.ANTIALIAS)
im = np.expand_dims((1.-np.array(im).astype(np.float32)), -1)
# dilation (thickness)
# kernel = np.ones((3,3),np.uint8)
# im = np.expand_dims(cv2.dilate(im,kernel,iterations = 1), -1)
return im
except Exception as e:
raise ValueError('Error with %s : %s' % (filename, e))
# sys.exit()
#########################################################################
# funtions for augmentation
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.html
#########################################################################
def _dilation(self, im, kernal_size=(2,2)):
# Dilation (thickness)
kernel = np.ones(kernal_size,np.uint8)
im = cv2.dilate(im,kernel,iterations = 1)
im[im>=0.5] = 1.
im[im<0.5] = 0.
return np.expand_dims(np.array(im).astype(np.float32), -1)
def _rotation(self, im, max_angle = 10):
# Rotation
rows,cols,ch = im.shape
angle = np.random.choice(np.append(np.arange(-max_angle,max_angle,max_angle//4),max_angle), 1)[0]
M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)
im = cv2.warpAffine(im,M,(cols,rows))
im[im>=0.5] = 1.
im[im<0.5] = 0.
return np.expand_dims(np.array(im).astype(np.float32), -1)
def _affine(self, im, max_tiltrate = 6):
# Affine transformation
rows,cols,ch = im.shape
tiltsize=np.random.choice(np.arange(max_tiltrate//4,max_tiltrate,max_tiltrate//4), 1)[0]
pts1 = np.float32([[tiltsize,tiltsize],[rows-tiltsize,tiltsize],[tiltsize,cols-tiltsize]])
pts2 = np.float32([[tiltsize,tiltsize],[rows,0],[0,cols]])
M = cv2.getAffineTransform(pts1,pts2)
im = cv2.warpAffine(im,M,(cols,rows))
im[im>=0.5] = 1.
im[im<0.5] = 0.
return np.expand_dims(np.array(im).astype(np.float32), -1)
def _perspective(self, im, max_padsize=6):
# Perspective tranformation
rows,cols,ch = im.shape
padsize=np.random.choice(np.arange(max_padsize//4,max_padsize,max_padsize//4), 1)[0]
pts1 = np.float32([[padsize,padsize],[rows-padsize,padsize],[padsize,cols-padsize],[rows-padsize,cols-padsize]])
pts2 = np.float32([[0,0],[rows,0],[0,cols],[rows,cols]])
M = cv2.getPerspectiveTransform(pts1,pts2)
im = cv2.warpPerspective(im,M,(rows,cols))
im[im>=0.5] = 1.
im[im<0.5] = 0.
return np.expand_dims(np.array(im).astype(np.float32), -1)
def get_augment(self, x):
for i in range(x.shape[0]):
if np.random.randint(2, size=1):
# if np.random.randint(2, size=1): x[i] = self._dilation(x[i]) # Dilation (thickness)
if np.random.randint(2, size=1): x[i] = self._rotation(x[i]) # Rotation
if np.random.randint(2, size=1): x[i] = self._affine(x[i]) # Affine transformation
if np.random.randint(2, size=1): x[i] = self._perspective(x[i]) # Perspective tranformation
return x
#########################################################################################################
# CelebA
#########################################################################################################
class CelebAReader(BaseReader):
def __init__(self, log, path_info, network_info, verbose=True):
super(CelebAReader,self).__init__(log, path_info, network_info, verbose)
self.crop_style=network_info['model_info']['crop_style'].strip()
self.attr_label=network_info['model_info']['attr_label'].strip()
self.read_dataset(self.attr_label)
if verbose: self.show_class_information()
def read_dataset(self, attr_label='Male'):
self.log.info('-------------------------------------------------')
self.log.info('Construct Dataset')
self.log.info('-------------------------------------------------')
self.log.info('Loading CelebA dataset information')
self.img_shape = (64, 64, 3)
# num_samples = len(os.listdir(self.data_path)) #202599
# self.datapoint_ids = np.arange(1, num_samples + 1)
# np.random.shuffle(self.datapoint_ids)
# self.x_list = ['%.6d.jpg' % i for i in self.datapoint_ids]
self.x_list = np.load('%s/x_list.npy' % ('/'.join(self.data_path.split('/')[:-1], datatype)))
self.attr = pd.read_csv('/'.join(self.data_path.split('/')[:-1])+'/list_attr_celeba.csv')
sorterIndex = dict(zip(self.x_list,range(len(self.x_list))))
self.attr['index'] = self.attr['image_id'].map(sorterIndex)
self.attr = self.attr.sort_values('index')
self.y = np.array(self.attr[attr_label])
self.y[self.y == -1] = 0
self.class_name = np.array(['no_%s' % attr_label,attr_label])
self.y_table = pd.Series(self.y)
self.y_counts = self.y_table.value_counts()
self.y_index = pd.Index(self.y)
self.y_class = np.sort(self.y_table.unique())
self.num_classes = self.y_class.shape[0]
def get_dataset(self):
return self.x_list, self.y
def get_label_name(self):
return self.class_name
def get_batch(self, idxs):
img_batches = np.array([self._read_celeba_image(self.x_list[i]) for i in idxs])
if self.augment:
img_batches = self.get_augment(img_batches)
if self.normalize_sym:
img_batches = (img_batches - 0.5) * 2.
return img_batches, self.y[np.array(idxs)]
def _read_celeba_image(self, filename):
# from WAE
width = 178
height = 218
new_width = 140
new_height = 140
im = Image.open(utils.o_gfile((self.data_path, filename), 'rb'))
if self.crop_style == 'closecrop':
# This method was used in DCGAN, pytorch-gan-collection, AVB, ...
left = (width - new_width) / 2.
top = (height - new_height) / 2.
right = (width + new_width) / 2.
bottom = (height + new_height)/2.
im = im.crop((left, top, right, bottom))
im = im.resize((64, 64), PIL.Image.ANTIALIAS)
elif self.crop_style == 'resizecrop':
# This method was used in ALI, AGE, ...
im = im.resize((64, 64+14), PIL.Image.ANTIALIAS)
im = im.crop((0, 7, 64, 64 + 7))
else:
raise Exception('Unknown crop style specified')
return np.array(im).reshape(64, 64, 3) / 255.
#########################################################################################################
# VGG 2 Face
#########################################################################################################
class VGGFace2Reader(BaseReader):
def __init__(self, log, path_info, network_info, mode='train', verbose=True):
super(VGGFace2Reader,self).__init__(log, path_info, network_info, verbose)
self.crop_style=network_info['model_info']['crop_style'].strip()
self.img_shape = np.array([int(ishape.strip()) for ishape in network_info['model_info']['img_shape'].split(',')])
self.mode = mode
self.read_dataset(nlabel=self.n_label)
if verbose: self.show_class_information()
try: self.feature_b = 'true' == network_info['model_info']['feature_b'].strip().lower()
except: self.feature_b = False
if self.feature_b:
if self.mode == 'train':
self.all_features_for_b = np.load('%s/all_features_normalized.npy' % path_info['data_info']['data_path'])
else:
self.all_features_for_b = np.load('%s/all_features_of_unknown_normalized.npy' % path_info['data_info']['data_path'])
self.log.info('Load all features for b: %s' % np.array(len(self.all_features_for_b)))
try: self.fixed_b_path = network_info['training_info']['fixed_b_path'].strip()
except: self.fixed_b_path = None
if self.fixed_b_path is not None:
self.all_b = np.load(self.fixed_b_path)
self.log.info('Load all b: %s' % np.array(self.all_b.shape))
def read_dataset(self, nlabel=None):
self.log.info('-------------------------------------------------')
self.log.info('Construct Dataset')
self.log.info('-------------------------------------------------')
self.log.info('Loading VGG Face 2 dataset information')
self.log.info('Set image shape : %s' % self.img_shape)
# names = os.listdir(self.data_path+'/npy_128')
# if not npersion==None:
# names = names[:npersion]
# file_dict = {}
# file_dict.update([(name, os.listdir(self.data_path+'/images/%s' % name)) for name in names])
# self.x_list = np.concatenate([['%s/%s'%(name, path) for path in paths] for name, paths in file_dict.items()])
# self.y = np.concatenate([[name]*len(paths) for name, paths in file_dict.items()])
if self.mode == 'train': list_path = "%s/%s" % (self.data_path, 'train_list.txt')
else: list_path = "%s/%s" % (self.data_path, 'test_list.txt')
with open(list_path, 'r') as f:
self.x_list = f.read()
self.x_list = np.array(self.x_list.split('\n')[:-1])
x_table = pd.Series(self.x_list)
self.y = np.array(x_table.map(lambda x : x.split("/")[0]))
if not nlabel==None:
y_table = pd.Series(self.y)
# selected_class = y_table.unique()[np.random.choice(np.arange(y_table.unique().shape[0]), nlabel)]
selected_class = np.sort(y_table.unique())[:nlabel]
selected_class = y_table.isin(selected_class)
self.x_list = self.x_list[selected_class]
self.y = self.y[selected_class]
## to categorical ####################
self.y_table = pd.Series(self.y)
self.y_index = pd.Index(self.y)
self.y_class = np.sort(self.y_table.unique())
self.y_class_to_int = dict(zip(self.y_class, range(self.y_class.shape[0])))
self.y_int_to_class = dict(zip(range(self.y_class.shape[0]), self.y_class))
self.num_classes = len(self.y_class)
######################################
if self.mode == 'train': self.image_info = pd.read_csv('%s/%s' % (self.data_path, 'bb_landmark/loose_bb_train.csv'))
else: self.image_info = pd.read_csv('%s/%s' % (self.data_path, 'bb_landmark/loose_bb_test.csv'))
self.image_info = self.image_info.set_index(['NAME_ID'])
def except_class(self, except_class):
self.log.info('----------------------------------------------------')
train_idx = np.arange(self.y.shape[0])
for unknown_class in except_class:
self.log.info('Except class %s' % unknown_class)
train_idx = train_idx[self.y[train_idx]!=unknown_class]
self.x_list = self.x_list[train_idx]
self.y = self.y[train_idx]
if self.verbose: self.show_class_information(self.y)
self.log.info('----------------------------------------------------')
## to categorical ####################
self.y_table = pd.Series(self.y)
self.y_index = | pd.Index(self.y) | pandas.Index |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 17:28:38 2019
@author: Chandar_S
"""
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from matplotlib.ticker import PercentFormatter
import pylab as pl
from pylab import rc
import numpy as np
import pandas as pd
import string
from PIL import ImageTk
import PIL
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from sklearn import model_selection, metrics, linear_model, naive_bayes, svm, ensemble, decomposition
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.neighbors import KNeighborsClassifier
#from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import TruncatedSVD
import xgboost
from keras.preprocessing import text, sequence
from keras import layers, models, optimizers
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.cluster import KMeansClusterer
import re
class mclass:
def __init__(self, window, embeddings_index):
self.window = window
self.embeddings_index = embeddings_index
self.stop = set(stopwords.words('english'))
# custom words to ignore
custom_stop_words = ['tracke option', 'track options', 'service xcall', 'work note', 'service option', 'osel', 'phontusa']
for word in custom_stop_words:
self.stop.add(word)
self.output_file = "TicketAnalytics_Results.xlsx"
window.title("AI Ops Suite")
image = PIL.Image.open("Images/MainLogo.png")
image = image.resize((112, 56), PIL.Image.ANTIALIAS)
img = ImageTk.PhotoImage(image)
panel = Label(window, image = img, width=150, height=100, justify="left")
panel.image = img
panel.grid(row=0, column=0)
self.Header= Label( window, text="Ticket Analytics", justify="center" )
self.Header.config(fg="teal", font=("Helvetica", 30))
self.Header.grid(row=0, column=1)
image = PIL.Image.open("Images/AI.png")
image = image.resize((100, 100), PIL.Image.ANTIALIAS)
img = ImageTk.PhotoImage(image)
panelright = Label(window, image = img, width=150, height=100, justify="right")
panelright.image = img
panelright.grid(row=0, column=2)
self.Filelabel = Label( window, text="EXCEL File Name" )
self.Filelabel.grid(row=1, column=0)
def browsefunc():
filename = filedialog.askopenfilename()
self.fileName.delete(0, END)
self.fileName.insert(END, filename)
# self.fileName.config(text=filename)
browsebutton = Button(window, text="Browse", command=browsefunc, justify="left")
browsebutton.grid(row=1, column=2)
self.fileName = Entry(window, relief=RIDGE, width=50)
self.fileName.insert(END, 'T&ADataForAnalysis_NonCluster.xlsx')
self.fileName.grid (row=1, column=1)
self.Sheetlabel = Label( window, text="Sheet Name" )
self.Sheetlabel.grid (row=2, column=0)
self.sheetName = Entry(window, relief=RIDGE, width=50)
self.sheetName.insert(END, 'Sheet1')
self.sheetName.grid (row=2, column=1)
self.button = Button (window, text="Read Data", command=self.ReadExcelData)
self.button.grid(row=3, column=1)
#%%
def ReadExcelData(self):
try:
self.excel_data=pd.read_excel(self.fileName.get(), sheet_name=self.sheetName.get())
column_data = self.excel_data.iloc[0:0,0:150] #Selecting the column that has text.
ticketlabel = Label( window, text="STEP 1: \n Select the column which has unique identifier \n (ex: ticket number)" )
sourcelabel = Label( window, text="STEP 2: \n Select the ticket data \n to analyze\n(Multiple fields can be selected)" )
targetlabel = Label( window, text="STEP 3: \n Select the classification column \n(semi-human classified data) \n to be predicted" )
self.ticket_column_list = Listbox(self.window, selectmode=SINGLE, width=50, height=10)
self.ticket_column_list.configure(exportselection=False)
self.source_column_list = Listbox(self.window, selectmode=EXTENDED, width=50, height=10)
self.source_column_list.configure(exportselection=False)
self.target_column_list = Listbox(self.window,selectmode=SINGLE, width=50, height=10)
self.source_column_list.configure(exportselection=False)
self.target_column_list .insert(END, "I DON'T HAVE THIS DATA")
self.source_column_dic = {}
ind = 0
for item in column_data:
self.ticket_column_list.insert(END, item)
self.source_column_list.insert(END, item)
self.source_column_dic[ind] = item
self.target_column_list .insert(END, item)
ind = ind + 1
Scrollbar(self.ticket_column_list, orient="vertical")
Scrollbar(self.source_column_list, orient="vertical")
Scrollbar(self.target_column_list, orient="vertical")
ticketlabel.grid(row=4, column=0)
self.ticket_column_list.grid(row=5, column=0)
sourcelabel.grid(row=4, column=1)
self.source_column_list.grid(row=5, column=1)
targetlabel.grid(row=4, column=2)
self.target_column_list .grid(row=5, column=2)
button = Button (self.window, text="Analyze Tickets", command=self.AnalyzeTickets)
button.grid(row=6, column=1)
# fb = ttk.Frame()
pb = ttk.Progressbar(self.window, orient ="horizontal",length = 200, mode ="indeterminate")
pb.grid(row=7, column=0, columnspan = 3)
# pb.pack(expand=True, fill=Tkinter.BOTH, side=Tkinter.TOP)
pb.start(50)
except Exception as e:
messagebox.showerror("Read Error", str(e))
#%%
def AnalyzeTickets(self):
# try:
items = self.source_column_list.curselection()
Analysis_primary_columnNames = [self.source_column_dic[int(item)] for item in items]
Analysis_Result_columnName = None if self.target_column_list.curselection()[0] == 0 else self.target_column_list.get(ACTIVE)
Analysis_ticket_columnName = self.ticket_column_list.get(ACTIVE)
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()
# Cleaning the text sentences so that punctuation marks, stop words & digits are removed
def clean(doc):
stop_free = " ".join([i for i in doc.lower().split() if i not in self.stop])
punc_free = ''.join(ch for ch in stop_free if ch not in exclude)
normalized = " ".join(lemma.lemmatize(word) for word in punc_free.split())
processed = re.sub(r"\d+","",normalized)
y = processed.split()
return y
#Converting the column of data from excel sheet into a list of documents, where each document corresponds to a group of words.
All_ticket_numbers=[]
training_corpus=[]
training_corpus_listformat=[]
training_description=[]
testing_corpus=[]
testing_description=[]
training_ticket_numbers=[]
testing_ticket_numbers=[]
training_output_category=[]
ticket_data = self.excel_data.iloc[:,0:150] #Selecting the column that has text.
# Trying to add a new column which will hold all the selected columns
New_Analysis_columnName = "MasterIssue"
ticket_data[New_Analysis_columnName] = ticket_data[Analysis_primary_columnNames[0]].copy()
# ticket_data.drop(columns=[Analysis_primary_columnNames[0]])
Analysis_primary_columnNames.remove(Analysis_primary_columnNames[0])
for item in Analysis_primary_columnNames:
ticket_data[New_Analysis_columnName] = ticket_data[New_Analysis_columnName] + " " + ticket_data[item]
# ticket_data.drop(columns=Analysis_primary_columnNames)
for index,row in ticket_data.iterrows():
line = ""
if (row[New_Analysis_columnName] and str(row[New_Analysis_columnName]) != 'nan' ):
line = str(row[New_Analysis_columnName])
line = line.strip()
cleaned_split = clean(line)
cleaned = ' '.join(cleaned_split)
''' IF MANUAL CLASSFICATION IS AVAILABLE, PUT THEM INTO TRAINING, ELSE TESTING'''
if (Analysis_Result_columnName is None or str(row[Analysis_Result_columnName]) != 'nan'):
training_description.append(line)
training_corpus.append(cleaned)
training_corpus_listformat.append(cleaned_split)
# Add ticket number for indexing
training_ticket_numbers.append(row[Analysis_ticket_columnName])
if not Analysis_Result_columnName is None:
training_output_category.append(row[Analysis_Result_columnName])
else:
testing_description.append(line)
testing_corpus.append(cleaned)
testing_ticket_numbers.append(row[Analysis_ticket_columnName])
All_ticket_numbers.append(row[Analysis_ticket_columnName])
# IF NO EXISTING MANUAL TAG AVAILABLE, PERFORM UNSUPERVISED LEARNING
if Analysis_Result_columnName is None:
# Perform unsupervised clustering and get cluster resullts
cluster_labels, clusters = self.PerformClustering(training_corpus_listformat)
# Analyze the clustering and come up with tagging to plot and generate excel
plot_frame, cluster_themes_dict = self.AnalyzeClustering(clusters, cluster_labels, training_corpus)
def tagCluster(cluster_no):
# return the first tagging
return cluster_themes_dict[cluster_no]
classification_dic={'Issue': training_description, 'Transformed Data':training_corpus, 'Machine Cluster':clusters, 'Machine Tag': list(map(tagCluster, clusters))}
excel_frame=pd.DataFrame(classification_dic, index=[training_ticket_numbers], columns=['Issue', 'Transformed Data', 'Machine Cluster', 'Machine Tag'])
# Show your results in pop-up, a pareto chart and a summary of clusters
self.PlotResults(plot_frame, excel_frame)
else:
classification_dic={'Issue': training_description, 'Transformed Data':training_corpus, 'Human Tag': training_output_category, 'Machine Tag': training_output_category} #Creating dict having doc with the corresponding cluster number.
excel_frame=pd.DataFrame(classification_dic, index=[training_ticket_numbers], columns=['Issue', 'Transformed Data', 'Machine Cluster', 'Human Tag', 'Machine Tag']) # Converting it into a dataframe.
# do prediction only if testing data is available
if len(testing_corpus) > 0:
predicted_labels = self.RunTrainingModels(training_corpus, training_output_category, testing_corpus)
classification_dic={'Issue': testing_description, 'Transformed Data' : testing_corpus, 'Machine Tag':predicted_labels} #Creating dict having doc with the corresponding cluster number.
predicted_frame=pd.DataFrame(classification_dic, index=[testing_ticket_numbers], columns=['Issue', 'Transformed Data', 'Machine Tag']) # Converting it into a dataframe.
excel_frame = | pd.concat([excel_frame, predicted_frame], sort=False) | pandas.concat |
import flask
from flask import Flask, send_from_directory, render_template, request, redirect, url_for
import pandas as pd
from random import randint, choice
from datetime import datetime
app = Flask(__name__,)
# prevent caching
app.config["CACHE_TYPE"] = "null"
# returns a json responce of the list of patients
@app.route('/get_patients')
def get_patients():
df = pd.read_csv("patients.csv")
return df.to_json(orient="split")
# generates random fields for patient 0
@app.route('/gen_data')
def gen_data():
records = pd.read_csv("records.csv")
# a random choice is selected from here for the comments field
choices = ["", "", "", "", "", "had bad back", "has weird mole"]
row = {
"Date" : datetime.now(),
"Patient_ID" : 0,
"Calories_Burned" : randint(1600, 3000) ,
"Steps_Taken" : randint(1000, 8000),
"Minutes_Slept" : randint(300, 600),
"BPM": randint(60, 100),
"Systolic": randint(120, 180),
"Diastolic": randint(80, 120),
"Floors_Climbed": randint(2, 14),
"Height": 170,
"Weight": 70,
"Comments": choice(choices),
}
records = records.append(row, ignore_index=True)
records.to_csv("records.csv", index=False)
return "Success"
# displays overview of infomation pertaining to the patient passed as argument
@app.route('/patient/<path:path>')
def get_patient(path):
patients = pd.read_csv("patients.csv")
records = pd.read_csv("records.csv")
# get the latest record for given patient in path
mostRecentRecord = records[records['Patient_ID'] == int(path)].sort_values(by='Date').iloc[-1]
latestRecord = {
'Calories_Burned': mostRecentRecord['Calories_Burned'],
'Steps_Taken': mostRecentRecord['Steps_Taken'],
'Hours_Slept': float(mostRecentRecord['Minutes_Slept'])/60,
'BPM': mostRecentRecord['BPM'],
'Systolic': mostRecentRecord['Systolic'],
'Diastolic': mostRecentRecord['Diastolic'],
'Floors_Climbed': mostRecentRecord['Floors_Climbed'],
'Height': mostRecentRecord['Height'],
'Weight': mostRecentRecord['Weight'],
}
# get the patients info
patientInfo = patients[patients['UniqueID'] == int(path)].iloc[0]
patient = {
'name' : patientInfo['Name'],
'gender' : patientInfo['Gender'],
'birthdate' : patientInfo['Birthdate'],
'address' : patientInfo['Address'],
'phone' : patientInfo['Phone'],
}
return render_template('profile.html', title="Patient", latestRecord=latestRecord, patient=patient, path=path)
# displays a table of the patient data (sensor readings)
@app.route('/patientlog/<path:path>')
def get_patient_log(path):
patients = pd.read_csv("patients.csv")
records = pd.read_csv("records.csv")
# get the latest record for given patient in path
patientRecords = records[records['Patient_ID'] == int(path)]
dateList = pd.to_datetime(patientRecords['Date']).tolist()
dateList = [datetime.strftime(i, "%Y-%m-%d %H:%M") for i in dateList]
records = {
'Date': dateList,
'Calories_Burned': patientRecords['Calories_Burned'].tolist(),
'Steps_Taken': patientRecords['Steps_Taken'].tolist(),
'Hours_Slept': patientRecords['Minutes_Slept'].tolist(),
'BPM': patientRecords['BPM'].tolist(),
'Systolic': patientRecords['Systolic'].tolist(),
'Diastolic': patientRecords['Diastolic'].tolist(),
'Floors_Climbed': patientRecords['Floors_Climbed'].tolist(),
'Height': patientRecords['Height'].tolist(),
'Weight': patientRecords['Weight'].tolist(),
'Comments': patientRecords['Comments'].tolist(),
}
patientInfo = patients[patients['UniqueID'] == int(path)].iloc[0]
patient = {
'name' : patientInfo['Name']
}
return render_template('patients-log.html', title="Patient Log", records=records, patient=patient, len = len(records['Date']), path=path)
# displays a list of patients
@app.route('/patients')
def get_patient_list():
return render_template('data.html', title="Patients")
# route for the static files
@app.route('/stat/<path:path>')
def get_static(path):
return send_from_directory('../', path)
# displays page that allows user to add a record for patient passed as arg
@app.route('/add_record/<path:path>')
def add_record(path):
return render_template('manualAddingInformation.html', title="Add Record", path=path)
# api to add record to database
# takes a json dict, which corresponds to db values as follows
# {
# 'date' : 'Date',
# 'weight' : 'Weight',
# 'systolic' : 'Systolic',
# 'diastolic' : 'Diastolic',
# 'calories' : 'Calories_Burned',
# 'stepsTaken' : 'Steps_Taken',
# 'hoursSlept' : 'Minutes_Slept',
# 'comments' : 'Comments',
# 'averagePulse' : 'BPM',
# }
#
# binds the db value to the value required by the front end
@app.route('/send_record/<path:path>', methods=['POST']) #GET requests will be blocked
def send_patient_data(path):
records = pd.read_csv("records.csv")
req_data = request.get_json()
keyDict = {
'date' : 'Date',
'weight' : 'Weight',
'systolic' : 'Systolic',
'diastolic' : 'Diastolic',
'calories' : 'Calories_Burned',
'stepsTaken' : 'Steps_Taken',
'hoursSlept' : 'Minutes_Slept',
'comments' : 'Comments',
'averagePulse' : 'BPM',
}
row = {'Patient_ID': path}
for i in range(len(req_data['formAnswers'])):
actualKey = keyDict[req_data['formAnswers'][i]['name']]
row[actualKey] = req_data['formAnswers'][i]['value']
records = records.append(row, ignore_index=True)
records.to_csv("records.csv", index=False)
return "Success"
# redirect / to patient page
@app.route('/')
def home():
return redirect("patients")
# gets stats for patient
@app.route('/patientstats/<path:path>')
def get_stats(path):
patients = pd.read_csv("patients.csv")
# sort by date order, this is so graphs display correctly
records = | pd.read_csv("records.csv") | pandas.read_csv |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df = pandas.DataFrame(frame_data)
result = df.fillna(0, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna(0, downcast="infer")
df_equals(modin_df, result)
# infer int64 from float64 when fillna value is a dict
df = pandas.DataFrame(frame_data)
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill")
)
def test_bfill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill")
)
def test_fillna_inplace(self):
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}),
df.fillna({"a": 0, "b": 5, "d": 7}),
)
# Series treated same as dict
df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))
def test_fillna_dataframe(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
modin_df2 = pd.DataFrame(df2)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(modin_df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value(self):
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns(self):
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(
modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"])
)
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
with pytest.raises(TypeError):
modin_df.filter(items=by["items"], regex=by["regex"])
with pytest.raises(TypeError):
modin_df.filter()
def test_first(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.first("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
def test_get_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_value(0, "col1")
def test_get_values(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(self, data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
def test_hist(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_df.T.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.T.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.T.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
def test_infer_objects(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).infer_objects()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
def test_info(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).info(memory_usage="deep")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert(self, data, loc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
column = "New Column"
value = modin_df.iloc[:, 0]
try:
pandas_df.insert(loc, column, value)
except Exception as e:
with pytest.raises(type(e)):
modin_df.insert(loc, column, value)
else:
modin_df.insert(loc, column, value)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Bad Column", modin_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Duplicate", modin_df[modin_df.columns[0]])
pandas_df.insert(0, "Duplicate", pandas_df[pandas_df.columns[0]])
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Scalar", 100)
pandas_df.insert(0, "Scalar", 100)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Too Short", list(modin_df[modin_df.columns[0]])[:-1])
with pytest.raises(ValueError):
modin_df.insert(0, modin_df.columns[0], modin_df[modin_df.columns[0]])
with pytest.raises(IndexError):
modin_df.insert(len(modin_df.columns) + 100, "Bad Loc", 100)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(columns=list("ab")).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(columns=list("ab")).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(index=modin_df.index).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(index=pandas_df.index).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.insert(
0, "DataFrame insert", modin_df[[modin_df.columns[0]]]
)
pandas_result = pandas_df.insert(
0, "DataFrame insert", pandas_df[[pandas_df.columns[0]]]
)
df_equals(modin_result, pandas_result)
def test_interpolate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).interpolate()
def test_is_copy(self):
data = test_data_values[0]
with pytest.warns(FutureWarning):
assert pd.DataFrame(data).is_copy == pandas.DataFrame(data).is_copy
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_items(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.items()
pandas_items = pandas_df.items()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iteritems(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.iteritems()
pandas_items = pandas_df.iteritems()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iterrows(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterrows = modin_df.iterrows()
pandas_iterrows = pandas_df.iterrows()
for modin_row, pandas_row in zip(modin_iterrows, pandas_iterrows):
modin_index, modin_series = modin_row
pandas_index, pandas_series = pandas_row
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_itertuples(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# test default
modin_it_default = modin_df.itertuples()
pandas_it_default = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_default, pandas_it_default):
np.testing.assert_equal(modin_row, pandas_row)
# test all combinations of custom params
indices = [True, False]
names = [None, "NotPandas", "Pandas"]
for index in indices:
for name in names:
modin_it_custom = modin_df.itertuples(index=index, name=name)
pandas_it_custom = pandas_df.itertuples(index=index, name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.ix()
def test_join(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col5": [0], "col6": [1]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["left", "right", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join(modin_df2, how=how)
pandas_join = pandas_df.join(pandas_df2, how=how)
df_equals(modin_join, pandas_join)
frame_data3 = {"col7": [1, 2, 3, 5, 6, 7, 8]}
modin_df3 = pd.DataFrame(frame_data3)
pandas_df3 = pandas.DataFrame(frame_data3)
join_types = ["left", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join([modin_df2, modin_df3], how=how)
pandas_join = pandas_df.join([pandas_df2, pandas_df3], how=how)
df_equals(modin_join, pandas_join)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
def test_kurt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurt()
def test_kurtosis(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurtosis()
def test_last(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.last("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_last_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.last_valid_index() == (pandas_df.last_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
assert modin_df.loc[0, key1] == pandas_df.loc[0, key1]
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [
True if i % 3 == 0 else False for i in range(len(modin_df.index))
]
columns = [
True if i % 5 == 0 else False for i in range(len(modin_df.columns))
]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
def test_loc_multi_index(self):
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(
modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"]
)
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert (
modin_df.loc[("bar", "one"), "col1"]
== pandas_df.loc[("bar", "one"), "col1"]
)
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
def test_lookup(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).lookup([0, 1], ["col1", "col2"])
def test_mad(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).mad()
def test_mask(self):
df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
m = df % 3 == 0
with pytest.warns(UserWarning):
try:
df.mask(~m, -df)
except ValueError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_max(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mean(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_median(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
class TestDFPartTwo:
def test_melt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).melt()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"index", bool_arg_values, ids=arg_keys("index", bool_arg_keys)
)
def test_memory_usage(self, data, index):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
modin_result = modin_df.memory_usage(index=index)
pandas_result = pandas_df.memory_usage(index=index)
df_equals(modin_result, pandas_result)
def test_merge(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = modin_df.merge(modin_df2, how=how)
pandas_result = pandas_df.merge(pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
# Named Series promoted to DF
s = pd.Series(frame_data2.get("col1"))
with pytest.raises(ValueError):
modin_df.merge(s)
s = pd.Series(frame_data2.get("col1"), name="col1")
df_equals(modin_df.merge(s), modin_df.merge(modin_df2[["col1"]]))
with pytest.raises(ValueError):
modin_df.merge("Non-valid type")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_min(self, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mode(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mode(axis=axis, numeric_only=numeric_only)
except Exception:
with pytest.raises(TypeError):
modin_df.mode(axis=axis, numeric_only=numeric_only)
else:
modin_result = modin_df.mode(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.ndim == pandas_df.ndim
def test_nlargest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nlargest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notna(), pandas_df.notna())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notnull(), pandas_df.notnull())
def test_nsmallest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nsmallest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"dropna", bool_arg_values, ids=arg_keys("dropna", bool_arg_keys)
)
def test_nunique(self, data, axis, dropna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.T.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
def test_pct_change(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).pct_change()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pipe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
n = len(modin_df.index)
a, b, c = 2 % n, 0, 3 % n
col = modin_df.columns[3 % len(modin_df.columns)]
def h(x):
return x.drop(columns=[col])
def g(x, arg1=0):
for _ in range(arg1):
x = x.append(x)
return x
def f(x, arg2=0, arg3=0):
return x.drop([arg2, arg3])
df_equals(
f(g(h(modin_df), arg1=a), arg2=b, arg3=c),
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
df_equals(
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
(pandas_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
def test_pivot(self):
df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
with pytest.warns(UserWarning):
df.pivot(index="foo", columns="bar", values="baz")
def test_pivot_table(self):
df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
with pytest.warns(UserWarning):
df.pivot_table(values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_plot(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
# We have to test this way because equality in plots means same object.
zipped_plot_lines = zip(modin_df.plot().lines, pandas_df.plot().lines)
for l, r in zipped_plot_lines:
if isinstance(l.get_xdata(), np.ma.core.MaskedArray) and isinstance(
r.get_xdata(), np.ma.core.MaskedArray
):
assert all((l.get_xdata() == r.get_xdata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
if isinstance(l.get_ydata(), np.ma.core.MaskedArray) and isinstance(
r.get_ydata(), np.ma.core.MaskedArray
):
assert all((l.get_ydata() == r.get_ydata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
temp_modin_df = modin_df.copy()
temp_pandas_df = pandas_df.copy()
modin_popped = temp_modin_df.pop(key)
pandas_popped = temp_pandas_df.pop(key)
df_equals(modin_popped, pandas_popped)
df_equals(temp_modin_df, temp_pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_prod(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_product(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.product(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("q", quantiles_values, ids=quantiles_keys)
def test_quantile(self, request, data, q):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.quantile(q), pandas_df.quantile(q))
df_equals(modin_df.quantile(q, axis=1), pandas_df.quantile(q, axis=1))
try:
pandas_result = pandas_df.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.quantile(q)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.T.quantile(q), pandas_df.T.quantile(q))
df_equals(modin_df.T.quantile(q, axis=1), pandas_df.T.quantile(q, axis=1))
try:
pandas_result = pandas_df.T.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.T.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.T.quantile(q)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("funcs", query_func_values, ids=query_func_keys)
def test_query(self, data, funcs):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.query("")
with pytest.raises(NotImplementedError):
x = 2 # noqa F841
modin_df.query("col1 < @x")
try:
pandas_result = pandas_df.query(funcs)
except Exception as e:
with pytest.raises(type(e)):
modin_df.query(funcs)
else:
modin_result = modin_df.query(funcs)
df_equals(modin_result, pandas_result)
def test_query_after_insert(self):
modin_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
modin_df["z"] = modin_df.eval("x / y")
modin_df = modin_df.query("z >= 0")
modin_result = modin_df.reset_index(drop=True)
modin_result.columns = ["a", "b", "c"]
pandas_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
pandas_df["z"] = pandas_df.eval("x / y")
pandas_df = pandas_df.query("z >= 0")
pandas_result = pandas_df.reset_index(drop=True)
pandas_result.columns = ["a", "b", "c"]
df_equals(modin_result, pandas_result)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"na_option", ["keep", "top", "bottom"], ids=["keep", "top", "bottom"]
)
def test_rank(self, data, axis, numeric_only, na_option):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.rank(axis=axis, numeric_only=numeric_only, na_option=na_option)
else:
modin_result = modin_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
df_equals(modin_result, pandas_result)
def test_reindex(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))
df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))
df_equals(
modin_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
pandas_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
)
df_equals(
modin_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
pandas_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
)
df_equals(
modin_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
pandas_df.reindex(
index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]
),
)
df_equals(
modin_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
pandas_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
)
def test_reindex_like(self):
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
with pytest.warns(UserWarning):
df2.reindex_like(df1)
def test_rename_sanity(self):
test_data = TestData()
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
modin_df = pd.DataFrame(test_data.frame)
df_equals(
modin_df.rename(columns=mapping), test_data.frame.rename(columns=mapping)
)
renamed2 = test_data.frame.rename(columns=str.lower)
df_equals(modin_df.rename(columns=str.lower), renamed2)
modin_df = pd.DataFrame(renamed2)
df_equals(
modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper)
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
tm.assert_index_equal(
modin_df.rename(index={"foo": "bar", "bar": "foo"}).index,
df.rename(index={"foo": "bar", "bar": "foo"}).index,
)
tm.assert_index_equal(
modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index
)
# have to pass something
with pytest.raises(TypeError):
modin_df.rename()
# partial columns
renamed = test_data.frame.rename(columns={"C": "foo", "D": "bar"})
modin_df = pd.DataFrame(test_data.frame)
tm.assert_index_equal(
modin_df.rename(columns={"C": "foo", "D": "bar"}).index,
test_data.frame.rename(columns={"C": "foo", "D": "bar"}).index,
)
# TODO: Uncomment when transpose works
# other axis
# renamed = test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
# tm.assert_index_equal(
# test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'}).index,
# modin_df.T.rename(index={'C': 'foo', 'D': 'bar'}).index)
# index with name
index = pandas.Index(["foo", "bar"], name="name")
renamer = pandas.DataFrame(data, index=index)
modin_df = pd.DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
modin_renamed = modin_df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, modin_renamed.index)
assert renamed.index.name == modin_renamed.index.name
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = pandas.MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = pandas.MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
frame_data = [(0, 0), (1, 1)]
df = pandas.DataFrame(frame_data, index=index, columns=columns)
modin_df = pd.DataFrame(frame_data, index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
modin_renamed = modin_df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.index, modin_renamed.index)
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
assert renamed.index.names == modin_renamed.index.names
assert renamed.columns.names == modin_renamed.columns.names
#
# with specifying a level
# dict
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# function
func = str.upper
renamed = df.rename(columns=func, level=0)
modin_renamed = modin_df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="fizz")
modin_renamed = modin_df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level=1)
modin_renamed = modin_df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="buzz")
modin_renamed = modin_df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# index
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
modin_renamed = modin_df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(modin_renamed.index, renamed.index)
@pytest.mark.skip(reason="Pandas does not pass this test")
def test_rename_nocopy(self):
test_data = | TestData() | pandas.tests.frame.common.TestData |
def telco_churn(quantile=.5):
'''Returns dataset in format x, [y1, y2]. This dataset
is useful for demonstrating multi-output model or for
experimenting with reduction strategy creation.
The data is from hyperparameter optimization experiment with
Kaggle telco churn dataset.
x: features
y1: val_loss
y2: val_f1score
quantile is for transforming the otherwise continuous y variables into
labels so that higher value is stronger. If set to 0 then original
continuous will be returned.'''
import wrangle
import pandas as pd
df = | pd.read_csv('https://raw.githubusercontent.com/autonomio/examples/master/telco_churn/telco_churn_for_sensitivity.csv') | pandas.read_csv |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from distutils.version import LooseVersion
import unittest
import numpy as np
import pandas as pd
from databricks import koalas as ks
from databricks.koalas.exceptions import SparkPandasIndexingError
from databricks.koalas.testing.utils import ComparisonTestBase, ReusedSQLTestCase, compare_both
class BasicIndexingTest(ComparisonTestBase):
@property
def pdf(self):
return pd.DataFrame(
{"month": [1, 4, 7, 10], "year": [2012, 2014, 2013, 2014], "sale": [55, 40, 84, 31]}
)
@compare_both(almost=False)
def test_indexing(self, df):
df1 = df.set_index("month")
yield df1
yield df.set_index("month", drop=False)
yield df.set_index("month", append=True)
yield df.set_index(["year", "month"])
yield df.set_index(["year", "month"], drop=False)
yield df.set_index(["year", "month"], append=True)
yield df1.set_index("year", drop=False, append=True)
df2 = df1.copy()
df2.set_index("year", append=True, inplace=True)
yield df2
self.assertRaisesRegex(KeyError, "unknown", lambda: df.set_index("unknown"))
self.assertRaisesRegex(KeyError, "unknown", lambda: df.set_index(["month", "unknown"]))
for d in [df, df1, df2]:
yield d.reset_index()
yield d.reset_index(drop=True)
yield df1.reset_index(level=0)
yield df2.reset_index(level=1)
yield df2.reset_index(level=[1, 0])
yield df1.reset_index(level="month")
yield df2.reset_index(level="year")
yield df2.reset_index(level=["month", "year"])
yield df2.reset_index(level="month", drop=True)
yield df2.reset_index(level=["month", "year"], drop=True)
self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 1 level, not 3",
lambda: df1.reset_index(level=2),
)
self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 1 level, not 4",
lambda: df1.reset_index(level=[3, 2]),
)
self.assertRaisesRegex(KeyError, "unknown.*month", lambda: df1.reset_index(level="unknown"))
self.assertRaisesRegex(
KeyError, "Level unknown not found", lambda: df2.reset_index(level="unknown")
)
df3 = df2.copy()
df3.reset_index(inplace=True)
yield df3
yield df1.sale.reset_index()
yield df1.sale.reset_index(level=0)
yield df2.sale.reset_index(level=[1, 0])
yield df1.sale.reset_index(drop=True)
yield df1.sale.reset_index(name="s")
yield df1.sale.reset_index(name="s", drop=True)
s = df1.sale
self.assertRaisesRegex(
TypeError,
"Cannot reset_index inplace on a Series to create a DataFrame",
lambda: s.reset_index(inplace=True),
)
s.reset_index(drop=True, inplace=True)
yield s
yield df1
# multi-index columns
df4 = df.copy()
df4.columns = pd.MultiIndex.from_tuples(
[("cal", "month"), ("cal", "year"), ("num", "sale")]
)
df5 = df4.set_index(("cal", "month"))
yield df5
yield df4.set_index([("cal", "month"), ("num", "sale")])
self.assertRaises(KeyError, lambda: df5.reset_index(level=("cal", "month")))
yield df5.reset_index(level=[("cal", "month")])
# non-string names
df6 = df.copy()
df6.columns = [10.0, 20.0, 30.0]
df7 = df6.set_index(10.0)
yield df7
yield df6.set_index([10.0, 30.0])
yield df7.reset_index(level=10.0)
yield df7.reset_index(level=[10.0])
df8 = df.copy()
df8.columns = pd.MultiIndex.from_tuples([(10, "month"), (10, "year"), (20, "sale")])
df9 = df8.set_index((10, "month"))
yield df9
yield df8.set_index([(10, "month"), (20, "sale")])
yield df9.reset_index(level=[(10, "month")])
def test_from_pandas_with_explicit_index(self):
pdf = self.pdf
df1 = ks.from_pandas(pdf.set_index("month"))
self.assertPandasEqual(df1.to_pandas(), pdf.set_index("month"))
df2 = ks.from_pandas(pdf.set_index(["year", "month"]))
self.assertPandasEqual(df2.to_pandas(), pdf.set_index(["year", "month"]))
def test_limitations(self):
df = self.kdf.set_index("month")
self.assertRaisesRegex(
ValueError,
"Level should be all int or all string.",
lambda: df.reset_index([1, "month"]),
)
class IndexingTest(ReusedSQLTestCase):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf(self):
return ks.from_pandas(self.pdf)
@property
def pdf2(self):
return pd.DataFrame(
{0: [1, 2, 3, 4, 5, 6, 7, 8, 9], 1: [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf2(self):
return ks.from_pandas(self.pdf2)
def test_at(self):
pdf = self.pdf
kdf = self.kdf
# Create the equivalent of pdf.loc[3] as a Koalas Series
# This is necessary because .loc[n] does not currently work with Koalas DataFrames (#383)
test_series = ks.Series([3, 6], index=["a", "b"], name="3")
# Assert invalided signatures raise TypeError
with self.assertRaises(TypeError, msg="Use DataFrame.at like .at[row_index, column_name]"):
kdf.at[3]
with self.assertRaises(TypeError, msg="Use DataFrame.at like .at[row_index, column_name]"):
kdf.at["ab"] # 'ab' is of length 2 but str type instead of tuple
with self.assertRaises(TypeError, msg="Use Series.at like .at[column_name]"):
test_series.at[3, "b"]
# Assert .at for DataFrames
self.assertEqual(kdf.at[3, "b"], 6)
self.assertEqual(kdf.at[3, "b"], pdf.at[3, "b"])
self.assert_eq(kdf.at[9, "b"], np.array([0, 0, 0]))
self.assert_eq(kdf.at[9, "b"], pdf.at[9, "b"])
# Assert .at for Series
self.assertEqual(test_series.at["b"], 6)
self.assertEqual(test_series.at["b"], pdf.loc[3].at["b"])
# Assert multi-character indices
self.assertEqual(
ks.Series([0, 1], index=["ab", "cd"]).at["ab"],
pd.Series([0, 1], index=["ab", "cd"]).at["ab"],
)
# Assert invalid column or index names result in a KeyError like with pandas
with self.assertRaises(KeyError, msg="x"):
kdf.at[3, "x"]
with self.assertRaises(KeyError, msg=99):
kdf.at[99, "b"]
with self.assertRaises(ValueError):
kdf.at[(3, 6), "b"]
with self.assertRaises(KeyError):
kdf.at[3, ("x", "b")]
# Assert setting values fails
with self.assertRaises(TypeError):
kdf.at[3, "b"] = 10
# non-string column names
pdf = self.pdf2
kdf = self.kdf2
# Assert .at for DataFrames
self.assertEqual(kdf.at[3, 1], 6)
self.assertEqual(kdf.at[3, 1], pdf.at[3, 1])
self.assert_eq(kdf.at[9, 1], np.array([0, 0, 0]))
self.assert_eq(kdf.at[9, 1], pdf.at[9, 1])
def test_at_multiindex(self):
pdf = self.pdf.set_index("b", append=True)
kdf = self.kdf.set_index("b", append=True)
# TODO: seems like a pandas' bug in pandas>=1.1.0
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
self.assert_eq(kdf.at[(3, 6), "a"], pdf.at[(3, 6), "a"])
self.assert_eq(kdf.at[(3,), "a"], pdf.at[(3,), "a"])
self.assert_eq(list(kdf.at[(9, 0), "a"]), list(pdf.at[(9, 0), "a"]))
self.assert_eq(list(kdf.at[(9,), "a"]), list(pdf.at[(9,), "a"]))
else:
self.assert_eq(kdf.at[(3, 6), "a"], 3)
self.assert_eq(kdf.at[(3,), "a"], np.array([3]))
self.assert_eq(list(kdf.at[(9, 0), "a"]), [7, 8, 9])
self.assert_eq(list(kdf.at[(9,), "a"]), [7, 8, 9])
with self.assertRaises(ValueError):
kdf.at[3, "a"]
def test_at_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.at["B", ("bar", "one")], pdf.at["B", ("bar", "one")])
with self.assertRaises(KeyError):
kdf.at["B", "bar"]
# non-string column names
arrays = [np.array([0, 0, 1, 1]), np.array([1, 2, 1, 2])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.at["B", (0, 1)], pdf.at["B", (0, 1)])
def test_iat(self):
pdf = self.pdf
kdf = self.kdf
# Create the equivalent of pdf.loc[3] as a Koalas Series
# This is necessary because .loc[n] does not currently work with Koalas DataFrames (#383)
test_series = ks.Series([3, 6], index=["a", "b"], name="3")
# Assert invalided signatures raise TypeError
with self.assertRaises(
TypeError,
msg="Use DataFrame.at like .iat[row_interget_position, column_integer_position]",
):
kdf.iat[3]
with self.assertRaises(
ValueError, msg="iAt based indexing on multi-index can only have tuple values"
):
kdf.iat[3, "b"] # 'ab' is of length 2 but str type instead of tuple
with self.assertRaises(TypeError, msg="Use Series.iat like .iat[row_integer_position]"):
test_series.iat[3, "b"]
# Assert .iat for DataFrames
self.assertEqual(kdf.iat[7, 0], 8)
self.assertEqual(kdf.iat[7, 0], pdf.iat[7, 0])
# Assert .iat for Series
self.assertEqual(test_series.iat[1], 6)
self.assertEqual(test_series.iat[1], pdf.loc[3].iat[1])
# Assert invalid column or integer position result in a KeyError like with pandas
with self.assertRaises(KeyError, msg=99):
kdf.iat[0, 99]
with self.assertRaises(KeyError, msg=99):
kdf.iat[99, 0]
with self.assertRaises(ValueError):
kdf.iat[(1, 1), 1]
with self.assertRaises(ValueError):
kdf.iat[1, (1, 1)]
# Assert setting values fails
with self.assertRaises(TypeError):
kdf.iat[4, 1] = 10
def test_iat_multiindex(self):
pdf = self.pdf.set_index("b", append=True)
kdf = self.kdf.set_index("b", append=True)
self.assert_eq(kdf.iat[7, 0], pdf.iat[7, 0])
with self.assertRaises(ValueError):
kdf.iat[3, "a"]
def test_iat_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.iat[1, 3], pdf.iat[1, 3])
with self.assertRaises(KeyError):
kdf.iat[0, 99]
with self.assertRaises(KeyError):
kdf.iat[99, 0]
def test_loc(self):
kdf = self.kdf
pdf = self.pdf
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
self.assert_eq(kdf.loc[3:8], pdf.loc[3:8])
self.assert_eq(kdf.loc[:8], pdf.loc[:8])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[[5]], pdf.loc[[5]])
self.assert_eq(kdf.loc[:], pdf.loc[:])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 1, 8]], pdf.loc[[3, 4, 1, 8]])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 1, 9]], pdf.loc[[3, 4, 1, 9]])
# TODO?: self.assert_eq(kdf.loc[np.array([3, 4, 1, 9])], pdf.loc[np.array([3, 4, 1, 9])])
self.assert_eq(kdf.a.loc[5:5], pdf.a.loc[5:5])
self.assert_eq(kdf.a.loc[3:8], pdf.a.loc[3:8])
self.assert_eq(kdf.a.loc[:8], pdf.a.loc[:8])
self.assert_eq(kdf.a.loc[3:], pdf.a.loc[3:])
self.assert_eq(kdf.a.loc[[5]], pdf.a.loc[[5]])
# TODO?: self.assert_eq(kdf.a.loc[[3, 4, 1, 8]], pdf.a.loc[[3, 4, 1, 8]])
# TODO?: self.assert_eq(kdf.a.loc[[3, 4, 1, 9]], pdf.a.loc[[3, 4, 1, 9]])
# TODO?: self.assert_eq(kdf.a.loc[np.array([3, 4, 1, 9])],
# pdf.a.loc[np.array([3, 4, 1, 9])])
self.assert_eq(kdf.a.loc[[]], pdf.a.loc[[]])
self.assert_eq(kdf.a.loc[np.array([])], pdf.a.loc[np.array([])])
self.assert_eq(kdf.loc[1000:], pdf.loc[1000:])
self.assert_eq(kdf.loc[-2000:-1000], pdf.loc[-2000:-1000])
self.assert_eq(kdf.loc[5], pdf.loc[5])
self.assert_eq(kdf.loc[9], pdf.loc[9])
self.assert_eq(kdf.a.loc[5], pdf.a.loc[5])
self.assert_eq(kdf.a.loc[9], pdf.a.loc[9])
self.assertRaises(KeyError, lambda: kdf.loc[10])
self.assertRaises(KeyError, lambda: kdf.a.loc[10])
# monotonically increasing index test
pdf = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]}, index=[0, 1, 1, 2, 2, 2, 4, 5, 6])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[:2], pdf.loc[:2])
self.assert_eq(kdf.loc[:3], pdf.loc[:3])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[4:], pdf.loc[4:])
self.assert_eq(kdf.loc[3:2], pdf.loc[3:2])
self.assert_eq(kdf.loc[-1:2], pdf.loc[-1:2])
self.assert_eq(kdf.loc[3:10], pdf.loc[3:10])
# monotonically decreasing index test
pdf = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]}, index=[6, 5, 5, 4, 4, 4, 2, 1, 0])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[:4], pdf.loc[:4])
self.assert_eq(kdf.loc[:3], pdf.loc[:3])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[2:], pdf.loc[2:])
self.assert_eq(kdf.loc[2:3], pdf.loc[2:3])
self.assert_eq(kdf.loc[2:-1], pdf.loc[2:-1])
self.assert_eq(kdf.loc[10:3], pdf.loc[10:3])
# test when type of key is string and given value is not included in key
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=["a", "b", "d"])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc["a":"z"], pdf.loc["a":"z"])
# KeyError when index is not monotonic increasing or decreasing
# and specified values don't exist in index
kdf = ks.DataFrame([[1, 2], [4, 5], [7, 8]], index=["cobra", "viper", "sidewinder"])
self.assertRaises(KeyError, lambda: kdf.loc["cobra":"koalas"])
self.assertRaises(KeyError, lambda: kdf.loc["koalas":"viper"])
kdf = ks.DataFrame([[1, 2], [4, 5], [7, 8]], index=[10, 30, 20])
self.assertRaises(KeyError, lambda: kdf.loc[0:30])
self.assertRaises(KeyError, lambda: kdf.loc[10:100])
def test_loc_non_informative_index(self):
pdf = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 30, 40])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[20:30], pdf.loc[20:30])
pdf = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 20, 40])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[20:20], pdf.loc[20:20])
def test_loc_with_series(self):
kdf = self.kdf
pdf = self.pdf
self.assert_eq(kdf.loc[kdf.a % 2 == 0], pdf.loc[pdf.a % 2 == 0])
self.assert_eq(kdf.loc[kdf.a % 2 == 0, "a"], pdf.loc[pdf.a % 2 == 0, "a"])
self.assert_eq(kdf.loc[kdf.a % 2 == 0, ["a"]], pdf.loc[pdf.a % 2 == 0, ["a"]])
self.assert_eq(kdf.a.loc[kdf.a % 2 == 0], pdf.a.loc[pdf.a % 2 == 0])
self.assert_eq(kdf.loc[kdf.copy().a % 2 == 0], pdf.loc[pdf.copy().a % 2 == 0])
self.assert_eq(kdf.loc[kdf.copy().a % 2 == 0, "a"], pdf.loc[pdf.copy().a % 2 == 0, "a"])
self.assert_eq(kdf.loc[kdf.copy().a % 2 == 0, ["a"]], pdf.loc[pdf.copy().a % 2 == 0, ["a"]])
self.assert_eq(kdf.a.loc[kdf.copy().a % 2 == 0], pdf.a.loc[pdf.copy().a % 2 == 0])
def test_loc_noindex(self):
kdf = self.kdf
kdf = kdf.reset_index()
pdf = self.pdf
pdf = pdf.reset_index()
self.assert_eq(kdf[["a"]], pdf[["a"]])
self.assert_eq(kdf.loc[:], pdf.loc[:])
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
def test_loc_multiindex(self):
kdf = self.kdf
kdf = kdf.set_index("b", append=True)
pdf = self.pdf
pdf = pdf.set_index("b", append=True)
self.assert_eq(kdf.loc[:], pdf.loc[:])
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
self.assert_eq(kdf.loc[5:9], pdf.loc[5:9])
self.assert_eq(kdf.loc[5], pdf.loc[5])
self.assert_eq(kdf.loc[9], pdf.loc[9])
# TODO: self.assert_eq(kdf.loc[(5, 3)], pdf.loc[(5, 3)])
# TODO: self.assert_eq(kdf.loc[(9, 0)], pdf.loc[(9, 0)])
self.assert_eq(kdf.a.loc[5], pdf.a.loc[5])
self.assert_eq(kdf.a.loc[9], pdf.a.loc[9])
self.assertTrue((kdf.a.loc[(5, 3)] == pdf.a.loc[(5, 3)]).all())
self.assert_eq(kdf.a.loc[(9, 0)], pdf.a.loc[(9, 0)])
# monotonically increasing index test
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5]},
index=pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c"), ("y", "d"), ("z", "e")]
),
)
kdf = ks.from_pandas(pdf)
for rows_sel in [
slice(None),
slice("y", None),
slice(None, "y"),
slice(("x", "b"), None),
slice(None, ("y", "c")),
slice(("x", "b"), ("y", "c")),
slice("x", ("y", "c")),
slice(("x", "b"), "y"),
]:
with self.subTest("monotonically increasing", rows_sel=rows_sel):
self.assert_eq(kdf.loc[rows_sel], pdf.loc[rows_sel])
self.assert_eq(kdf.a.loc[rows_sel], pdf.a.loc[rows_sel])
# monotonically increasing first index test
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5]},
index=pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c"), ("y", "a"), ("z", "e")]
),
)
kdf = ks.from_pandas(pdf)
for rows_sel in [
slice(None),
slice("y", None),
slice(None, "y"),
]:
with self.subTest("monotonically increasing first index", rows_sel=rows_sel):
self.assert_eq(kdf.loc[rows_sel], pdf.loc[rows_sel])
self.assert_eq(kdf.a.loc[rows_sel], pdf.a.loc[rows_sel])
for rows_sel in [
slice(("x", "b"), None),
slice(None, ("y", "c")),
slice(("x", "b"), ("y", "c")),
slice("x", ("y", "c")),
slice(("x", "b"), "y"),
]:
with self.subTest("monotonically increasing first index", rows_sel=rows_sel):
self.assertRaises(KeyError, lambda: kdf.loc[rows_sel])
self.assertRaises(KeyError, lambda: kdf.a.loc[rows_sel])
# not monotonically increasing index test
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5]},
index=pd.MultiIndex.from_tuples(
[("z", "e"), ("y", "d"), ("y", "c"), ("x", "b"), ("x", "a")]
),
)
kdf = ks.from_pandas(pdf)
for rows_sel in [
slice("y", None),
slice(None, "y"),
slice(("x", "b"), None),
slice(None, ("y", "c")),
slice(("x", "b"), ("y", "c")),
slice("x", ("y", "c")),
slice(("x", "b"), "y"),
]:
with self.subTest("monotonically decreasing", rows_sel=rows_sel):
self.assertRaises(KeyError, lambda: kdf.loc[rows_sel])
self.assertRaises(KeyError, lambda: kdf.a.loc[rows_sel])
def test_loc2d_multiindex(self):
kdf = self.kdf
kdf = kdf.set_index("b", append=True)
pdf = self.pdf
pdf = pdf.set_index("b", append=True)
self.assert_eq(kdf.loc[:, :], pdf.loc[:, :])
self.assert_eq(kdf.loc[:, "a"], pdf.loc[:, "a"])
self.assert_eq(kdf.loc[5:5, "a"], pdf.loc[5:5, "a"])
self.assert_eq(kdf.loc[:, "a":"a"], pdf.loc[:, "a":"a"])
self.assert_eq(kdf.loc[:, "a":"c"], pdf.loc[:, "a":"c"])
self.assert_eq(kdf.loc[:, "b":"c"], pdf.loc[:, "b":"c"])
def test_loc2d(self):
kdf = self.kdf
pdf = self.pdf
# index indexer is always regarded as slice for duplicated values
self.assert_eq(kdf.loc[5:5, "a"], pdf.loc[5:5, "a"])
self.assert_eq(kdf.loc[[5], "a"], pdf.loc[[5], "a"])
self.assert_eq(kdf.loc[5:5, ["a"]], pdf.loc[5:5, ["a"]])
self.assert_eq(kdf.loc[[5], ["a"]], pdf.loc[[5], ["a"]])
self.assert_eq(kdf.loc[:, :], pdf.loc[:, :])
self.assert_eq(kdf.loc[3:8, "a"], pdf.loc[3:8, "a"])
self.assert_eq(kdf.loc[:8, "a"], pdf.loc[:8, "a"])
self.assert_eq(kdf.loc[3:, "a"], pdf.loc[3:, "a"])
self.assert_eq(kdf.loc[[8], "a"], pdf.loc[[8], "a"])
self.assert_eq(kdf.loc[3:8, ["a"]], pdf.loc[3:8, ["a"]])
self.assert_eq(kdf.loc[:8, ["a"]], pdf.loc[:8, ["a"]])
self.assert_eq(kdf.loc[3:, ["a"]], pdf.loc[3:, ["a"]])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 3], ['a']], pdf.loc[[3, 4, 3], ['a']])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.loc[3, 3, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[3, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[3:, 3])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.a.loc[kdf.a % 2 == 0, 3])
self.assert_eq(kdf.loc[5, "a"], pdf.loc[5, "a"])
self.assert_eq(kdf.loc[9, "a"], pdf.loc[9, "a"])
self.assert_eq(kdf.loc[5, ["a"]], pdf.loc[5, ["a"]])
self.assert_eq(kdf.loc[9, ["a"]], pdf.loc[9, ["a"]])
self.assert_eq(kdf.loc[:, "a":"a"], pdf.loc[:, "a":"a"])
self.assert_eq(kdf.loc[:, "a":"d"], pdf.loc[:, "a":"d"])
self.assert_eq(kdf.loc[:, "c":"d"], pdf.loc[:, "c":"d"])
# bool list-like column select
bool_list = [True, False]
self.assert_eq(kdf.loc[:, bool_list], pdf.loc[:, bool_list])
self.assert_eq(kdf.loc[:, np.array(bool_list)], pdf.loc[:, np.array(bool_list)])
pser = pd.Series(bool_list, index=pdf.columns)
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
pser = pd.Series(list(reversed(bool_list)), index=list(reversed(pdf.columns)))
self.assert_eq(kdf.loc[:, pser], pdf.loc[:, pser])
self.assertRaises(IndexError, lambda: kdf.loc[:, bool_list[:-1]])
self.assertRaises(IndexError, lambda: kdf.loc[:, np.array(bool_list + [True])])
self.assertRaises(SparkPandasIndexingError, lambda: kdf.loc[:, | pd.Series(bool_list) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Monday 3 December 2018
@author: <NAME>
"""
import os
import pandas as pd
import numpy as np
import feather
import time
from datetime import date
import sys
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import normalize
import somoclu
from delprocess.loadprofiles import resampleProfiles
from .metrics import mean_index_adequacy, davies_bouldin_score
from ..support import cluster_dir, results_dir
def progress(n, stats):
"""Report progress information, return a string."""
s = "%s : " % (n)
s += "\nsilhouette: %(silhouette).3f " % stats
s += "\ndbi: %(dbi).3f " % stats
s += "\nmia: %(mia).3f " % stats
return print(s)
def clusterStats(cluster_stats, n, X, cluster_labels, preprocessing, transform, tic, toc):
stats = {'n_sample': 0,
'cluster_size': [],
'silhouette': 0.0,
'dbi': 0.0,
'mia': 0.0,
'all_scores': 0.0,
# 'cdi': 0.0,
't0': time.time(),
'batch_fit_time': 0.0,
'total_sample': 0}
cluster_stats[n] = stats
try:
cluster_stats[n]['total_sample'] += X.shape[0]
cluster_stats[n]['n_sample'] = X.shape[0]
cluster_stats[n]['silhouette'] = silhouette_score(X, cluster_labels, sample_size=10000)
cluster_stats[n]['dbi'] = davies_bouldin_score(X, cluster_labels)
cluster_stats[n]['mia'] = mean_index_adequacy(X, cluster_labels)
#cluster_stats[n_clusters][y]['cdi'] =cluster_dispersion_index(Xbatch, cluster_labels) DON'T RUN LOCALLY!! - need to change to chunked alogrithm once released
cluster_stats[n]['cluster_size'] = np.bincount(cluster_labels)
cluster_stats[n]['batch_fit_time'] = toc - tic
cluster_stats[n]['preprocessing'] = preprocessing
cluster_stats[n]['transform'] = transform
cluster_stats[n]['all_scores'] = cluster_stats[n]['dbi']*cluster_stats[n]['mia']/cluster_stats[n]['silhouette']
s = "%s : " % (n)
s += "\nsilhouette: %(silhouette).3f " % stats
s += "\ndbi: %(dbi).3f " % stats
s += "\nmia: %(mia).3f " % stats
print(s)
except:
print('Could not compute clustering stats for n = ' + str(n))
pass
return cluster_stats
def saveResults(experiment_name, cluster_stats, cluster_centroids, som_dim, elec_bin, save=True):
"""
Saves cluster stats results and centroids for a single clustering iteration.
Called inside kmeans() and som() functions.
"""
for k, v in cluster_stats.items():
n = k
evals = pd.DataFrame(cluster_stats).T
evals['experiment_name'] = experiment_name
evals['som_dim'] = som_dim
evals['n_clust'] = n
evals['elec_bin'] = elec_bin
eval_results = evals.drop(labels='cluster_size', axis=1).reset_index(drop=True)
# eval_results.rename({'index':'k'}, axis=1, inplace=True)
eval_results[['dbi','mia','silhouette']] = eval_results[['dbi','mia','silhouette']].astype(float)
eval_results['date'] = date.today().isoformat()
# eval_results['best_clusters'] = None
centroid_results = pd.DataFrame(cluster_centroids)
centroid_results['experiment_name'] = experiment_name
centroid_results['som_dim'] = som_dim
centroid_results['n_clust'] = n
centroid_results['elec_bin'] = elec_bin
try:
centroid_results['cluster_size'] = evals['cluster_size'][n]
except:
centroid_results['cluster_size'] = np.nan
centroid_results.reset_index(inplace=True)
centroid_results.rename({'index':'k'}, axis=1, inplace=True)
centroid_results['date'] = date.today().isoformat()
#3 Save Results
if save is True:
os.makedirs(results_dir, exist_ok=True)
erpath = os.path.join(results_dir, 'cluster_results.csv')
if os.path.isfile(erpath):
eval_results.to_csv(erpath, mode='a', index=False, header=False)
else:
eval_results.to_csv(erpath, index=False)
os.makedirs(cluster_dir, exist_ok=True)
crpath = os.path.join(cluster_dir, experiment_name + '_centroids.csv')
if os.path.isfile(crpath):
centroid_results.to_csv(crpath, mode='a', index=False, header=False)
else:
centroid_results.to_csv(crpath, index=False)
print('Results saved for', experiment_name, str(som_dim), str(n))
return eval_results, centroid_results
def xBins(X, bin_type):
if bin_type == 'amd':
Xdd_A = X.sum(axis=1)
Xdd = Xdd_A*230/1000
XmonthlyPower = resampleProfiles(Xdd, interval='M', aggfunc='sum')
Xamd = resampleProfiles(XmonthlyPower, interval='A', aggfunc='mean').reset_index().groupby('ProfileID').mean()
Xamd.columns=['amd']
amd_bins = [0, 1, 50, 150, 400, 600, 1200, 2500, 4000]
bin_labels = ['{0:.0f}-{1:.0f}'.format(x,y) for x, y in zip(amd_bins[:-1], amd_bins[1:])]
Xamd['bins'] = pd.cut(Xamd.amd, amd_bins, labels=bin_labels, right=True, include_lowest=True)
Xbin_dict = dict()
for c in Xamd.bins.cat.categories:
Xbin_dict[c] = Xamd[Xamd.bins==c].index.values
del Xdd_A, Xdd, XmonthlyPower, Xamd
if bin_type == 'integral':
Xint = normalize(X).cumsum(axis=1)
Xintn = pd.DataFrame(Xint, index=X.index)
Xintn['max'] = X.max(axis=1)
clusterer = MiniBatchKMeans(n_clusters=8, random_state=10)
clusterer.fit(np.array(Xintn))
cluster_labels = clusterer.predict(np.array(Xintn))
labl = pd.DataFrame(cluster_labels, index=X.index)
Xbin_dict = dict()
for c in labl[0].unique():
Xbin_dict['bin'+str(c)] = labl[labl[0]==c].index.values
return Xbin_dict
def preprocessX(X, norm=None):
if norm == 'unit_norm': #Kwac et al 2013
Xnorm = normalize(X)
elif norm == 'zero-one': #Dent et al 2014
Xnorm = np.array(X.divide(X.max(axis=1), axis=0))
elif norm == 'demin': #Jin et al 2016
Xnorm = normalize(X.subtract(X.min(axis=1), axis=0))
elif norm == 'sa_norm': #Dekenah 2014
Xnorm = np.array(X.divide(X.mean(axis=1), axis=0))
else:
Xnorm = np.array(X)
#Xnorm.fillna(0, inplace=True)
Xnorm[np.isnan(Xnorm)] = 0
return Xnorm
def kmeans(X, range_n_clusters, top_lbls=10, preprocessing = None, bin_X=False, experiment_name=None):
"""
This function applies the MiniBatchKmeans algorithm from sklearn on inputs X for range_n_clusters.
If preprossing = True, X is normalised with sklearn.preprocessing.normalize()
Returns cluster stats, cluster centroids and cluster labels.
"""
if experiment_name is None:
save = False
else:
if preprocessing is None:
pass
else:
experiment_name = experiment_name+'_'+ preprocessing
save = True
#apply pre-binning
if bin_X != False:
Xbin = xBins(X, bin_X)
else:
Xbin = {'all':X}
for b, ids in Xbin.items():
try:
A = X.loc[ids,:]
except:
A = ids
#apply preprocessing
A = preprocessX(A, norm=preprocessing)
centroids = pd.DataFrame()
stats = pd.DataFrame()
cluster_lbls = pd.DataFrame()
dim = 0 #set dim to 0 to match SOM formating
cluster_lbls_dim = {}
stats_dim = pd.DataFrame()
for n_clust in range_n_clusters:
clusterer = MiniBatchKMeans(n_clusters=n_clust, random_state=10)
#train clustering algorithm
tic = time.time()
clusterer.fit(A)
cluster_labels = clusterer.predict(A)
toc = time.time()
## Calculate scores
cluster_stats = clusterStats({}, n_clust, A, cluster_labels,
preprocessing = preprocessing, transform = None,
tic = tic, toc = toc)
cluster_centroids = clusterer.cluster_centers_
eval_results, centroid_results = saveResults(experiment_name, cluster_stats,
cluster_centroids, dim, b, save)
stats_dim = stats_dim.append(eval_results)
centroids = centroids.append(centroid_results)
cluster_lbls_dim[n_clust] = cluster_labels
#outside n_clust loop
best_clusters, best_stats = bestClusters(cluster_lbls_dim, stats_dim, top_lbls)
cluster_lbls = pd.concat([cluster_lbls, best_clusters], axis=1)
stats = pd.concat([stats, best_stats], axis=0)
stats.reset_index(drop=True, inplace=True)
if save is True:
saveLabels(cluster_lbls, stats)
return stats, centroids, cluster_lbls
def som(X, range_n_dim, top_lbls=10, preprocessing = None, bin_X=False, transform=None, experiment_name=None, **kwargs):
"""
This function applies the self organising maps algorithm from somoclu on inputs X over square maps of range_n_dim.
If preprossing = True, X is normalised with sklearn.preprocessing.normalize()
If kmeans = True, the KMeans algorithm from sklearn is applied to the SOM and returns clusters
kwargs can be n_clusters = range(start, end, interval) OR list()
Returns cluster stats, cluster centroids and cluster labels.
"""
for dim in range_n_dim:
limit = int(np.sqrt(len(X)/20))
if dim > limit: #verify that number of nodes are sensible for size of input data
return print('Input size too small for map. Largest n should be ' + str(limit))
else:
pass
if experiment_name is None:
save = False
else:
if preprocessing is None:
pass
else:
experiment_name = experiment_name+'_'+ preprocessing
save = True
#apply pre-binning
if bin_X != False:
Xbin = xBins(X, bin_X)
else:
Xbin = {'0-4000':X}
for b, ids in Xbin.items():
try:
A = X.loc[ids,:]
except:
A = ids
#apply preprocessing
A = preprocessX(A, norm=preprocessing)
centroids = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import calendar
SEC_PER_DAY = 86400
MM_PER_M = 1000
WB_COMPONENTS = ['scalarTotalRunoff', 'scalarGroundEvaporation', 'pptrate',
'scalarCanopyEvaporation', 'scalarCanopyTranspiration',
'scalarSnowSublimation', 'scalarCanopySublimation',
'scalarSWE', 'scalarTotalSoilWat', 'scalarCanopyWat']
WB_LONGNAMES = ['Evapotranspiration (ET)', 'Runoff', 'Precipitation',
'Soil & canopy moisture',
'Snow water equivalent (SWE)', 'Baseflow']
def _determine_suffix(ds):
suffix = ''
for var in list(ds.keys()):
if var.startswith(WB_COMPONENTS[0]):
if var == WB_COMPONENTS[0]:
return suffix
else:
return var.replace(WB_COMPONENTS[0], '_')
def aggregate_wb_vars(ds):
out_vars = ['evaporation', 'precipitation', 'runoff',
'swe', 'soil_moisture', 'baseflow'] # , 'canopy_moisture']
suffix = _determine_suffix(ds)
ds = ds.where(ds['scalarTotalRunoff{}'.format(suffix)] > -100, drop=True)
ds['precipitation'] = ds['pptrate{}'.format(suffix)] * SEC_PER_DAY
ds['evaporation'] = - SEC_PER_DAY * (
ds['scalarGroundEvaporation{}'.format(suffix)]
+ ds['scalarCanopyEvaporation{}'.format(suffix)]
+ ds['scalarCanopyTranspiration{}'.format(suffix)]
+ ds['scalarSnowSublimation{}'.format(suffix)]
+ ds['scalarCanopySublimation{}'.format(suffix)])
ds['runoff'] = (ds['scalarTotalRunoff{}'.format(suffix)]
* SEC_PER_DAY * MM_PER_M)
ds['baseflow'] = SEC_PER_DAY * MM_PER_M * (
ds['scalarAquiferBaseflow{}'.format(suffix)])
ds['swe'] = ds['scalarSWE{}'.format(suffix)]
ds['soil_moisture'] = (ds['scalarTotalSoilLiq{}'.format(suffix)]
+ ds['scalarTotalSoilIce{}'.format(suffix)]
+ ds['scalarCanopyIce{}'.format(suffix)]
+ ds['scalarCanopyLiq{}'.format(suffix)])
return ds[out_vars]
def calc_monthly_flux(da: xr.DataArray, year: int) -> xr.DataArray:
"""Calculates monthly change in a data array for a given year"""
feb_end = 29 if calendar.isleap(year) else 28
start = [f'9-30-{year-1}', f'10-31-{year-1}', f'11-30-{year-1}',
f'12-31-{year-1}', f'01-31-{year}', f'02-{feb_end}-{year}',
f'03-31-{year}', f'04-30-{year}', f'05-31-{year}',
f'06-30-{year}', f'07-31-{year}', f'08-31-{year}']
end = [f'10-31-{year-1}', f'11-30-{year-1}', f'12-31-{year-1}',
f'01-31-{year}', f'02-{feb_end}-{year}', f'03-31-{year}',
f'04-30-{year}', f'05-31-{year}', f'06-30-{year}',
f'07-31-{year}', f'08-31-{year}', f'09-30-{year}']
da = da.copy(deep=True).resample(time='D').mean()
return np.array([da.sel(time=e).values - da.sel(time=s).values
for s, e in zip(start, end)])
def calc_monthly_sum(da: xr.DataArray, year: int) -> xr.DataArray:
"""Calculates monthly change in a data array for a given year"""
feb_end = 29 if calendar.isleap(year) else 28
start = [f'10-01-{year-1}', f'11-01-{year-1}', f'12-01-{year-1}',
f'01-01-{year}', f'02-01-{year}', f'03-01-{year}',
f'04-01-{year}', f'05-01-{year}', f'06-01-{year}',
f'07-01-{year}', f'08-01-{year}', f'09-01-{year}']
end = [f'10-31-{year-1}', f'11-30-{year-1}', f'12-31-{year-1}',
f'01-31-{year}', f'02-{feb_end}-{year}', f'03-31-{year}',
f'04-30-{year}', f'05-31-{year}', f'06-30-{year}',
f'07-31-{year}', f'08-31-{year}', f'09-30-{year}']
da = da.copy(deep=True).resample(time='D').mean()
return np.array([da.sel(time=slice(s, e)).sum(dim='time')
for s, e in zip(start, end)])
def monthly_water_balance(ds: xr.Dataset, year: int,
agg_dims: list=None) -> pd.DataFrame:
wb_vars = ['evaporation', 'runoff', 'precipitation',
'soil_moisture', 'swe', 'baseflow']
wy_slice = slice(f'10-01-{year-1}', f'9-30-{year}')
time_group = ds.sel(time=wy_slice).time.dt.month
wb_monthly = ds.sel(time=wy_slice).groupby(time_group).sum(dim=['time'])
if agg_dims is not None:
wb_monthly = wb_monthly[wb_vars].sum(dim=agg_dims)
else:
wb_monthly = wb_monthly[wb_vars]
wb_monthly['swe'].values = calc_monthly_flux(ds['swe'], year)
wb_monthly['soil_moisture'].values = (
calc_monthly_flux(ds['soil_moisture'], year))
wb_monthly['evaporation'].values = (
calc_monthly_sum(ds['evaporation'], year))
wb_monthly['runoff'].values = calc_monthly_sum(ds['runoff'], year)
wb_monthly['baseflow'].values = calc_monthly_sum(ds['baseflow'], year)
wb_monthly['precipitation'].values = (
calc_monthly_sum(ds['precipitation'], year))
wb_df = wb_monthly.to_dataframe()
wb_df.index -= 1
return wb_df
def calc_seasonal_flux(da: xr.DataArray, year: int) -> xr.DataArray:
"""Calculates seasonal change in a data array for a given year"""
feb_end = 29 if calendar.isleap(year) else 28
start = [f'12-01-{year-1}', f'03-01-{year}',
f'06-01-{year}', f'08-31-{year}']
end = [f'02-{feb_end}-{year}', f'05-31-{year}',
f'08-31-{year}', f'11-30-{year}']
da = da.copy(deep=True).resample(time='D').mean()
return np.array([da.sel(time=e).values - da.sel(time=s).values
for s, e in zip(start, end)])
def calc_seasonal_sum(da: xr.DataArray, year: int) -> xr.DataArray:
"""Calculates seasonal change in a data array for a given year"""
feb_end = 29 if calendar.isleap(year) else 28
start = [f'12-01-{year-1}', f'03-01-{year}',
f'06-01-{year}', f'09-01-{year}']
end = [f'02-{feb_end}-{year}', f'05-31-{year}',
f'08-31-{year}', f'11-30-{year}']
da = da.copy(deep=True).resample(time='D').sum()
return np.array([da.sel(time=slice(s, e)).sum(dim='time')
for s, e in zip(start, end)])
def seasonal_water_balance(ds: xr.Dataset, year: int,
agg_dims: list=None) -> pd.DataFrame:
wb_vars = ['evaporation', 'runoff', 'precipitation',
'soil_moisture', 'swe', 'baseflow']
wy_slice = slice(f'11-30-{year-1}', f'12-31-{year}')
time_group = ds.sel(time=wy_slice).time.dt.season
wb_seasonal = ds.sel(time=wy_slice).groupby(time_group).sum(dim=['time'])
if agg_dims is not None:
wb_seasonal = wb_seasonal[wb_vars].sum(dim=agg_dims)
else:
wb_seasonal = wb_seasonal[wb_vars]
wb_seasonal['swe'].values = calc_seasonal_flux(ds['swe'], year)
wb_seasonal['soil_moisture'].values = (
calc_seasonal_flux(ds['soil_moisture'], year))
wb_seasonal['soil_moisture'].values += (
calc_seasonal_sum(ds['evaporation'], year))
wb_seasonal['runoff'].values = calc_seasonal_sum(ds['runoff'], year)
wb_seasonal['baseflow'].values = calc_seasonal_sum(ds['baseflow'], year)
wb_seasonal['precipitation'].values = (
calc_seasonal_sum(ds['precipitation'], year))
wb_df = wb_seasonal.to_dataframe()
return wb_df
def water_balance(ds, start_year, end_year, how='seasonal',
ax=None, legend=True):
if how not in ['seasonal', 'monthly']:
raise NotImplementedError()
s_df = []
ds_agg = aggregate_wb_vars(ds)
if how == 'seasonal':
for year in np.arange(start_year, end_year):
s_df.append(seasonal_water_balance(ds_agg, year, agg_dims=['hru']))
s_df = | pd.concat(s_df) | pandas.concat |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = f"index > '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = 10_000
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = f"index <= '{beg_dt}' & index >= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
def test_frame_select(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
msg = "could not convert string to Timestamp"
with pytest.raises(ValueError, match=msg):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", '(index>df.index[3] & index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
msg = "cannot use an invert condition when passing to numexpr"
with pytest.raises(NotImplementedError, match=msg):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = | read_hdf(hh, "df", where="l1=selection.index.tolist()") | pandas.read_hdf |
from MovieRecommender import train_test_model
import pandas as pd
import numpy as np
import sys
from scipy.sparse import csr_matrix, load_npz
import pickle
from tabulate import tabulate
def get_movies_rated(data, user_id, train_data, movies):
data_matrix = data.loc[data.rating != 0]
users = list(np.sort(data_matrix.user_id.unique())) # Get unique users
items = list(np.sort(data_matrix.item_id.unique())) # Get unique movies
users_arr = np.array(users) # Array of user IDs from the ratings matrix
items_arr = np.array(items) # Array of movie IDs from the ratings matrix
# Returns index row of user id
user_ind = np.where(users_arr == user_id)[0][0]
# Get column indices of rated items
rating_ind = train_data[user_ind, :].nonzero()[1]
movie_codes = items_arr[rating_ind] # Get the movie ids for rated items
return movies.loc[movies['item_id'].isin(movie_codes),
'name'].reset_index(drop=True)
def predict_ratings(predictions, item_vecs, user_id):
item_vecs = predictions[1]
user_vec = predictions[0][user_id, :]
pred = user_vec.dot(item_vecs).toarray()[0].reshape(-1)
return pred
def similar_items(movies, model, movie_list, n_similar=20):
# Use implicit to get similar items.
movies.name = movies.name.str.strip()
item_id = movies.item_id.loc[movies.name.str.lower().
isin([s.lower() for s in movie_list])].iloc[0]
movie_names = []
similar = model.similar_items(item_id, n_similar)
# Print the names of similar movies
for item in similar:
idx, rating = item
movie_names.append(movies.name.loc[movies.item_id == idx+1].iloc[0])
similar = pd.DataFrame({"Similar Movies": movie_names[1:]})
return similar
def recommendations(data, train_data, movies, model,
sparse_user_item, user_id):
# Use the implicit recommender.
recommended = model.recommend(user_id, sparse_user_item)
movies_recom = []
# ratings_recom = []
# Get movie names from ids
for item in recommended:
idx, rating = item
movies_recom.append((movies.name.loc[movies.item_id == idx+1].iloc[0]))
# ratings_recom.append(rating)
# Create a dataframe of movie names and scores
# recommendations = pd.DataFrame({'Movies': movies_recom,
# 'Rating': ratings_recom})
movies_rated_by_users = get_movies_rated(data, user_id, train_data, movies)
minlen = min(len(movies_recom), len(movies_rated_by_users))
recommendations = pd.DataFrame({'Recommended Movies':
movies_recom[:minlen],
'Movies Rated':
movies_rated_by_users[:minlen]})
return recommendations
def main():
train_test_model.main()
movies = | pd.read_pickle("./output/movies.pkl") | pandas.read_pickle |
from pyomo.environ import value
from watertap3.utils import watertap_setup, get_case_study, run_watertap3, run_model, get_results_table
import pandas as pd
import numpy as np
__all__ = ['run_sensitivity', 'run_sensitivity_power', 'get_fixed_onm_reduction']
def run_sensitivity(m=None, save_results=False, return_results=False, scenario=None, case_study=None, tds_only=False):
ro_list = ['reverse_osmosis', 'ro_first_pass', 'ro_a1', 'ro_b1',
'ro_active', 'ro_restore', 'ro_first_stage']
sens_df = | pd.DataFrame() | pandas.DataFrame |
import os,sys
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import pygeos
from osgeo import gdal
from tqdm import tqdm
import igraph as ig
import contextily as ctx
from rasterstats import zonal_stats
import time
import pylab as pl
from IPython import display
import seaborn as sns
import subprocess
import shutil
from multiprocessing import Pool,cpu_count
import pathlib
code_path = (pathlib.Path(__file__).parent.absolute())
gdal.SetConfigOption("OSM_CONFIG_FILE", os.path.join(code_path,'..','..',"osmconf.ini"))
from shapely.wkb import loads
data_path = os.path.join('..','data')
from simplify import *
from extract import railway,ferries,mainRoads,roads
from population_OD import create_bbox,create_grid
pd.options.mode.chained_assignment = None
def closest_node(node, nodes):
"""[summary]
Args:
node ([type]): [description]
nodes ([type]): [description]
Returns:
[type]: [description]
"""
dist_2 = np.sum((nodes - node)**2, axis=1)
return np.argmin(dist_2)
def load_network(osm_path,mainroad=True):
"""[summary]
Args:
osm_path ([type]): [description]
mainroad (bool, optional): [description]. Defaults to True.
Returns:
[type]: [description]
"""
if mainroad:
df = mainRoads(osm_path)
else:
df = roads(osm_path)
net = Network(edges=df)
net = clean_roundabouts(net)
net = split_edges_at_nodes(net)
net = add_endpoints(net)
net = add_ids(net)
net = add_topology(net)
net = drop_hanging_nodes(net)
net = merge_edges(net)
net = reset_ids(net)
net = add_distances(net)
net = merge_multilinestrings(net)
net = fill_attributes(net)
net = add_travel_time(net)
return net
def make_directed(edges):
save_edges = []
for ind,edge in edges.iterrows():
if edge.oneway == 'yes':
save_edges.append(edge)
else:
edge.oneway = 'yes'
edge.lanes = np.round(edge.lanes/2,0)
save_edges.append(edge)
edge2 = edge.copy()
from_id = edge.from_id
to_id = edge.to_id
edge2.from_id = to_id
edge2.to_id = from_id
save_edges.append(edge2)
new_edges = pd.DataFrame(save_edges).reset_index(drop=True)
new_edges.id = new_edges.index
return new_edges
def get_gdp_values(gdf,data_path):
"""[summary]
Args:
gdf ([type]): [description]
Returns:
[type]: [description]
"""
world_pop = os.path.join(data_path,'global_gdp','GDP_2015.tif')
gdf['geometry'] = gdf.geometry.apply(lambda x: loads(pygeos.to_wkb(x)))
gdp = list(item['sum'] for item in zonal_stats(gdf.geometry,world_pop,
stats="sum"))
gdp = [x if x is not None else 0 for x in gdp]
gdf['geometry'] = pygeos.from_shapely(gdf.geometry)
return gdp
def country_grid_gdp_filled(trans_network,country,data_path,rough_grid_split=100,from_main_graph=False):
"""[summary]
Args:
trans_network ([type]): [description]
rough_grid_split (int, optional): [description]. Defaults to 100.
Returns:
[type]: [description]
"""
if from_main_graph==True:
node_df = trans_network.copy()
envelop = pygeos.envelope(pygeos.multilinestrings(node_df.geometry.values))
height = np.sqrt(pygeos.area(envelop)/rough_grid_split)
else:
node_df = trans_network.nodes.copy()
node_df.geometry,approximate_crs = convert_crs(node_df)
envelop = pygeos.envelope(pygeos.multilinestrings(node_df.geometry.values))
height = np.sqrt(pygeos.area(envelop)/rough_grid_split)
gdf_admin = pd.DataFrame(create_grid(create_bbox(node_df),height),columns=['geometry'])
#load data and convert to pygeos
country_shape = gpd.read_file(os.path.join(data_path,'GADM','gadm36_levels.gpkg'),layer=0)
country_shape = pd.DataFrame(country_shape.loc[country_shape.GID_0==country])
country_shape.geometry = pygeos.from_shapely(country_shape.geometry)
gdf_admin = pygeos.intersection(gdf_admin,country_shape.geometry)
gdf_admin = gdf_admin.loc[~pygeos.is_empty(gdf_admin.geometry)]
gdf_admin['centroid'] = pygeos.centroid(gdf_admin.geometry)
gdf_admin['km2'] = area(gdf_admin)
gdf_admin['gdp'] = get_gdp_values(gdf_admin,data_path)
gdf_admin = gdf_admin.loc[gdf_admin.gdp > 0].reset_index()
gdf_admin['gdp_area'] = gdf_admin.gdp/gdf_admin['km2']
return gdf_admin
def convert_crs(gdf,current_crs="epsg:4326"):
"""[summary]
Args:
gdf ([type]): [description]
Returns:
[type]: [description]
"""
if current_crs == "epsg:4326":
lat = pygeos.geometry.get_y(pygeos.centroid(gdf['geometry'].iloc[0]))
lon = pygeos.geometry.get_x(pygeos.centroid(gdf['geometry'].iloc[0]))
# formula below based on :https://gis.stackexchange.com/a/190209/80697
approximate_crs = "epsg:" + str(int(32700-np.round((45+lat)/90,0)*100+np.round((183+lon)/6,0)))
else:
approximate_crs = "epsg:4326"
#from pygeos/issues/95
geometries = gdf['geometry']
coords = pygeos.get_coordinates(geometries)
transformer=pyproj.Transformer.from_crs(current_crs, approximate_crs,always_xy=True)
new_coords = transformer.transform(coords[:, 0], coords[:, 1])
result = pygeos.set_coordinates(geometries.copy(), np.array(new_coords).T)
return result,approximate_crs
def area(gdf,km=True):
"""[summary]
Args:
gdf ([type]): [description]
km (bool, optional): [description]. Defaults to True.
Returns:
[type]: [description]
"""
if km:
return pygeos.area(convert_crs(gdf)[0])/1e6
else:
return pygeos.area(convert_crs(gdf)[0])
def get_basetable(country,data_path):
io_data_path = os.path.join(data_path,'country_IO_tables')
df = pd.read_csv(os.path.join(io_data_path,'IO_{}_2015_BasicPrice.txt'.format(country)),
sep='\t', skiprows=1,header=[0,1,2],index_col = [0,1,2,3],
skipfooter=2617,engine='python')
basetable = df.iloc[:26,:26]
return basetable.astype(int)
def create_OD(gdf_admin,country_name,data_path):
"""[summary]
Args:
gdf_admin ([type]): [description]
country_name ([type]): [description]
Returns:
[type]: [description]
"""
# create list of sectors
sectors = [chr(i).upper() for i in range(ord('a'),ord('o')+1)]
# add a region column if not existing yet.
if 'NAME_1' not in gdf_admin.columns:
gdf_admin['NAME_1'] = ['reg'+str(x) for x in list(gdf_admin.index)]
# prepare paths to downscale a country. We give a country its own directory
# to allow for multiple unique countries running at the same time
downscale_basepath = os.path.join(code_path,'..','..','downscale_od')
downscale_countrypath = os.path.join(code_path,'..','..','run_downscale_od_{}'.format(country_name))
# copy downscaling method into the country directory
shutil.copytree(downscale_basepath,downscale_countrypath)
# save national IO table as basetable for downscaling
get_basetable(country_name,data_path).to_csv(os.path.join(downscale_countrypath,'basetable.csv'),
sep=',',header=False,index=False)
# create proxy table with GDP values per region/area
proxy_reg = pd.DataFrame(gdf_admin[['NAME_1','gdp_area']])
proxy_reg['year'] = 2016
proxy_reg = proxy_reg[['year','NAME_1','gdp_area']]
proxy_reg.columns = ['year','id','gdp_area']
proxy_reg.to_csv(os.path.join(downscale_countrypath,'proxy_reg.csv'),index=False)
indices = pd.DataFrame(sectors,columns=['sector'])
indices['name'] = country_name
indices = indices.reindex(['name','sector'],axis=1)
indices.to_csv(os.path.join(downscale_countrypath,'indices.csv'),index=False,header=False)
# prepare yaml file
yaml_file = open(os.path.join(downscale_countrypath,"settings_basic.yml"), "r")
list_of_lines = yaml_file.readlines()
list_of_lines[6] = ' - id: {}\n'.format(country_name)
list_of_lines[8] = ' into: [{}] \n'.format(','.join(['reg'+str(x) for x in list(gdf_admin.index)]))
yaml_file = open(os.path.join(downscale_countrypath,"settings_basic.yml"), "w")
yaml_file.writelines(list_of_lines)
yaml_file.close()
# run libmrio
p = subprocess.Popen([os.path.join(downscale_countrypath,'mrio_disaggregate'), 'settings_basic.yml'],
cwd=os.path.join(downscale_countrypath))
p.wait()
# create OD matrix from libmrio results
OD = pd.read_csv(os.path.join(downscale_countrypath,'output.csv'),header=None)
OD.columns = pd.MultiIndex.from_product([gdf_admin.NAME_1,sectors])
OD.index = pd.MultiIndex.from_product([gdf_admin.NAME_1,sectors])
OD = OD.groupby(level=0,axis=0).sum().groupby(level=0,axis=1).sum()
OD = (OD*5)/365
OD_dict = OD.stack().to_dict()
gdf_admin['import'] = list(OD.sum(axis=1))
gdf_admin['export'] = list(OD.sum(axis=0))
gdf_admin = gdf_admin.rename({'NAME_1': 'name'}, axis='columns')
# and remove country folder again to avoid clutter in the directory
shutil.rmtree(downscale_countrypath)
return OD,OD_dict,sectors,gdf_admin
def prepare_network_routing(transport_network):
"""[summary]
Args:
transport_network ([type]): [description]
Returns:
[type]: [description]
"""
gdf_roads = make_directed(transport_network.edges)
gdf_roads = gdf_roads.rename(columns={"highway": "infra_type"})
gdf_roads['GC'] = gdf_roads.apply(gc_function,axis=1)
gdf_roads['max_flow'] = gdf_roads.apply(set_max_flow,axis=1)
gdf_roads['flow'] = 0
gdf_roads['wait_time'] = 0
return gdf_roads
def create_graph(gdf_roads):
"""[summary]
Args:
gdf_roads ([type]): [description]
Returns:
[type]: [description]
"""
gdf_in = gdf_roads.reindex(['from_id','to_id'] + [x for x in list(gdf_roads.columns) if x not in ['from_id','to_id']],axis=1)
g = ig.Graph.TupleList(gdf_in.itertuples(index=False), edge_attrs=list(gdf_in.columns)[2:],directed=True)
sg = g.clusters().giant()
gdf_in.set_index('id',inplace=True)
return sg,gdf_in
def nearest_network_node_list(gdf_admin,gdf_nodes,sg):
"""[summary]
Args:
gdf_admin ([type]): [description]
gdf_nodes ([type]): [description]
sg ([type]): [description]
Returns:
[type]: [description]
"""
gdf_nodes = gdf_nodes.loc[gdf_nodes.id.isin(sg.vs['name'])]
gdf_nodes.reset_index(drop=True,inplace=True)
nodes = {}
for admin_ in gdf_admin.itertuples():
nodes[admin_.name] = gdf_nodes.iloc[pygeos.distance((admin_.centroid),gdf_nodes.geometry).idxmin()].id
return nodes
def set_max_flow(segment):
"""[summary]
Args:
segment ([type]): [description]
Returns:
[type]: [description]
"""
empty_trip_correction = 0.7 #available capacity for freight reduces
# standard lane capacity = 1000 passenger vehicles per lane per hour
# trunk and motorway correct by factor 4
# primary correct by factor 2
# secondary correct by factor 1
# tertiary correct factor 0.5
# other roads correct factor 0.5
# passenger vehicle equvalent for trucks: 3.5
# average truck load: 8 tonnes
# 30 % of trips are empty
# median value per ton: 2,000 USD
# median truck value: 8*2000 = 16,000 USD
standard_max_flow = 1000/3.5*16000*empty_trip_correction
if (segment.infra_type == 'trunk') | (segment.infra_type == 'trunk_link'):
return standard_max_flow*4
elif (segment.infra_type == 'motorway') | (segment.infra_type == 'motorway_link'):
return standard_max_flow*4
elif (segment.infra_type == 'primary') | (segment.infra_type == 'primary_link'):
return standard_max_flow*2
elif (segment.infra_type == 'secondary') | (segment.infra_type == 'secondary_link'):
return standard_max_flow*1
elif (segment.infra_type == 'tertiary') | (segment.infra_type == 'tertiary_link'):
return standard_max_flow*0.5
else:
return standard_max_flow*0.5
def gc_function(segment):
"""[summary]
Args:
segment ([type]): [description]
Returns:
[type]: [description]
"""
# GC = α ∗ WaitT + β ∗ TrvlT + μ ∗ Trate + γ ∗ stddev
Wait_time = 0
if segment.infra_type in ['primary','primary_link']:
Trate = 0.5
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
elif segment.infra_type in ['secondary','secondary_link']:
Trate = 1
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
elif segment.infra_type in ['tertiary','tertiary_link']:
Trate = 1.5
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
else:
Trate = 2
return 0.57*Wait_time+0.49*segment['time']+1*Trate+0.44*1
def update_gc_function(segment):
"""[summary]
Args:
segment ([type]): [description]
Returns:
[type]: [description]
"""
# GC = α ∗ WaitT + β ∗ TrvlT + μ ∗ Trate + γ ∗ stddev
if segment['flow'] > segment['max_flow']:
segment['wait_time'] += 1
elif segment['wait_time'] > 0:
segment['wait_time'] - 1
else:
segment['wait_time'] = 0
if segment['infra_type'] in ['primary','primary_link']:
Trate = 0.5
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
elif segment['infra_type'] in ['secondary','secondary_link']:
Trate = 1
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
elif segment['infra_type'] in ['tertiary','tertiary_link']:
Trate = 1.5
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
else:
Trate = 2
return 0.57*segment['wait_time']+0.49*segment['time']+1*Trate+0.44*1
def run_flow_analysis(country,transport_network,gdf_admin,OD_dict,notebook=False):
"""[summary]
Args:
transport_network ([type]): [description]
gdf_admin ([type]): [description]
Returns:
[type]: [description]
"""
plt.rcParams['figure.figsize'] = [5, 5]
gdf_roads = prepare_network_routing(transport_network)
sg,gdf_in = create_graph(gdf_roads)
nearest_node = nearest_network_node_list(gdf_admin,transport_network.nodes,sg)
dest_nodes = [sg.vs['name'].index(nearest_node[x]) for x in list(nearest_node.keys())]
# this is where the iterations goes
iterator = 0
optimal = False
max_iter = 100
save_fits = []
if not notebook:
plt.ion() ## Note this correction
# run flow optimization model
while optimal == False:
#update cost function per segment, dependent on flows from previous iteration.
sg.es['GC'] = [(lambda segment: update_gc_function(segment))(segment) for segment in list(sg.es)]
sg.es['flow'] = 0
#(re-)assess shortest paths between all regions
for admin_orig in (list(gdf_admin.name)):
paths = sg.get_shortest_paths(sg.vs[sg.vs['name'].index(nearest_node[admin_orig])],dest_nodes,weights='GC',output="epath")
for path,admin_dest in zip(paths,list(gdf_admin.name)):
flow_value = OD_dict[(admin_orig,admin_dest)]
sg.es[path]['flow'] = [x + flow_value for x in sg.es[path]['flow']]
fitting_edges = (sum([x<y for x,y in zip(sg.es['flow'],sg.es['max_flow'])])/len(sg.es))
save_fits.append(fitting_edges)
# if at least 99% of roads are below max flow, we say its good enough
if (sum([x<y for x,y in zip(sg.es['flow'],sg.es['max_flow'])])/len(sg.es)) > 0.99:
optimal = True
iterator += 1
# when running the code in a notebook, the figure updates instead of a new figure each iteration
if notebook:
pl.plot(save_fits)
display.display(pl.gcf())
display.clear_output(wait=True)
else:
plt.plot(save_fits)
plt.xlabel('# iteration')
plt.ylabel('Share of edges below maximum flow')
plt.show()
plt.pause(0.0001) #Note this correction
if iterator == max_iter:
break
# save output
plt.savefig(os.path.join(code_path,'..','..','figures','{}_flow_modelling.png'.format(country)))
gdf_in['flow'] = | pd.DataFrame(sg.es['flow'],columns=['flow'],index=sg.es['id']) | pandas.DataFrame |
from collections import defaultdict
from datetime import datetime
from itertools import product
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
array,
concat,
merge,
)
import pandas._testing as tm
from pandas.core.algorithms import safe_sort
import pandas.core.common as com
from pandas.core.sorting import (
decons_group_index,
get_group_index,
is_int64_overflow_possible,
lexsort_indexer,
nargsort,
)
class TestSorting:
@pytest.mark.slow
def test_int64_overflow(self):
B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))
A = np.arange(2500)
df = DataFrame(
{
"A": A,
"B": B,
"C": A,
"D": B,
"E": A,
"F": B,
"G": A,
"H": B,
"values": np.random.randn(2500),
}
)
lg = df.groupby(["A", "B", "C", "D", "E", "F", "G", "H"])
rg = df.groupby(["H", "G", "F", "E", "D", "C", "B", "A"])
left = lg.sum()["values"]
right = rg.sum()["values"]
exp_index, _ = left.index.sortlevel()
tm.assert_index_equal(left.index, exp_index)
exp_index, _ = right.index.sortlevel(0)
tm.assert_index_equal(right.index, exp_index)
tups = list(map(tuple, df[["A", "B", "C", "D", "E", "F", "G", "H"]].values))
tups = com.asarray_tuplesafe(tups)
expected = df.groupby(tups).sum()["values"]
for k, v in expected.items():
assert left[k] == right[k[::-1]]
assert left[k] == v
assert len(left) == len(right)
def test_int64_overflow_moar(self):
# GH9096
values = range(55109)
data = DataFrame.from_dict({"a": values, "b": values, "c": values, "d": values})
grouped = data.groupby(["a", "b", "c", "d"])
assert len(grouped) == len(values)
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5))
i = np.random.choice(len(arr), len(arr) * 4)
arr = np.vstack((arr, arr[i])) # add sume duplicate rows
i = np.random.permutation(len(arr))
arr = arr[i] # shuffle rows
df = DataFrame(arr, columns=list("abcde"))
df["jim"], df["joe"] = np.random.randn(2, len(df)) * 10
gr = df.groupby(list("abcde"))
# verify this is testing what it is supposed to test!
assert is_int64_overflow_possible(gr.grouper.shape)
# manually compute groupings
jim, joe = defaultdict(list), defaultdict(list)
for key, a, b in zip(map(tuple, arr), df["jim"], df["joe"]):
jim[key].append(a)
joe[key].append(b)
assert len(gr) == len(jim)
mi = MultiIndex.from_tuples(jim.keys(), names=list("abcde"))
def aggr(func):
f = lambda a: np.fromiter(map(func, a), dtype="f8")
arr = np.vstack((f(jim.values()), f(joe.values()))).T
res = DataFrame(arr, columns=["jim", "joe"], index=mi)
return res.sort_index()
tm.assert_frame_equal(gr.mean(), aggr(np.mean))
tm.assert_frame_equal(gr.median(), aggr(np.median))
def test_lexsort_indexer(self):
keys = [[np.nan] * 5 + list(range(100)) + [np.nan] * 5]
# orders=True, na_position='last'
result = lexsort_indexer(keys, orders=True, na_position="last")
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=True, na_position='first'
result = lexsort_indexer(keys, orders=True, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='last'
result = lexsort_indexer(keys, orders=False, na_position="last")
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='first'
result = lexsort_indexer(keys, orders=False, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
def test_nargsort(self):
# np.argsort(items) places NaNs last
items = [np.nan] * 5 + list(range(100)) + [np.nan] * 5
# np.argsort(items2) may not place NaNs first
items2 = np.array(items, dtype="O")
# mergesort is the most difficult to get right because we want it to be
# stable.
# According to numpy/core/tests/test_multiarray, """The number of
# sorted items must be greater than ~50 to check the actual algorithm
# because quick and merge sort fall over to insertion sort for small
# arrays."""
# mergesort, ascending=True, na_position='last'
result = nargsort(items, kind="mergesort", ascending=True, na_position="last")
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items, kind="mergesort", ascending=True, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items, kind="mergesort", ascending=False, na_position="last")
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items, kind="mergesort", ascending=False, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='last'
result = nargsort(items2, kind="mergesort", ascending=True, na_position="last")
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items2, kind="mergesort", ascending=True, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items2, kind="mergesort", ascending=False, na_position="last")
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(
items2, kind="mergesort", ascending=False, na_position="first"
)
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
class TestMerge:
@pytest.mark.slow
def test_int64_overflow_issues(self):
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7), columns=list("ABCDEF") + ["G1"])
df2 = DataFrame(np.random.randn(1000, 7), columns=list("ABCDEF") + ["G2"])
# it works!
result = merge(df1, df2, how="outer")
assert len(result) == 2000
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)), columns=list("ABCDEFG"))
left["left"] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ["right"]
right.index = np.arange(len(right))
right["right"] *= -1
out = merge(left, right, how="outer")
assert len(out) == len(left)
tm.assert_series_equal(out["left"], -out["right"], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
tm.assert_series_equal(out["left"], result, check_names=False)
assert result.name is None
out.sort_values(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ["left", "right", "outer", "inner"]:
tm.assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how="left", sort=False)
tm.assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how="left", sort=False)
tm.assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(
np.random.randint(low, high, (n, 7)).astype("int64"),
columns=list("ABCDEFG"),
)
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
assert is_int64_overflow_possible(shape)
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(
np.random.randint(low, high, (n // 2, 7)).astype("int64"),
columns=list("ABCDEFG"),
)
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left["left"] = np.random.randn(len(left))
right["right"] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list("ABCDEFG")).iterrows():
ldict[idx].append(row["left"])
for idx, row in right.set_index(list("ABCDEFG")).iterrows():
rdict[idx].append(row["right"])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(
k
+ (
lv,
rv,
)
)
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(
k
+ (
np.nan,
rv,
)
)
def align(df):
df = df.sort_values(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list("ABCDEFG")
tm.assert_frame_equal(
df[kcols].copy(), df[kcols].sort_values(kcols, kind="mergesort")
)
out = DataFrame(vals, columns=list("ABCDEFG") + ["left", "right"])
out = align(out)
jmask = {
"left": out["left"].notna(),
"right": out["right"].notna(),
"inner": out["left"].notna() & out["right"].notna(),
"outer": np.ones(len(out), dtype="bool"),
}
for how in ["left", "right", "outer", "inner"]:
mask = jmask[how]
frame = align(out[mask].copy())
assert mask.all() ^ mask.any() or how == "outer"
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
tm.assert_frame_equal(
frame, align(res), check_dtype=how not in ("right", "outer")
)
def test_decons():
def testit(codes_list, shape):
group_index = get_group_index(codes_list, shape, sort=True, xnull=True)
codes_list2 = decons_group_index(group_index, shape)
for a, b in zip(codes_list, codes_list2):
tm.assert_numpy_array_equal(a, b)
shape = (4, 5, 6)
codes_list = [
np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64),
]
testit(codes_list, shape)
shape = (10000, 10000)
codes_list = [
np.tile(np.arange(10000, dtype=np.int64), 5),
np.tile(np.arange(10000, dtype=np.int64), 5),
]
testit(codes_list, shape)
class TestSafeSort:
def test_basic_sort(self):
values = [3, 1, 2, 0, 4]
result = safe_sort(values)
expected = np.array([0, 1, 2, 3, 4])
tm.assert_numpy_array_equal(result, expected)
values = list("baaacb")
result = safe_sort(values)
expected = np.array(list("aaabbc"), dtype="object")
tm.assert_numpy_array_equal(result, expected)
values = []
result = safe_sort(values)
expected = np.array([])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("verify", [True, False])
def test_codes(self, verify):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
codes = [0, 1, 1, 2, 3, 0, -1, 4]
result, result_codes = safe_sort(values, codes, verify=verify)
expected_codes = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp)
| tm.assert_numpy_array_equal(result, expected) | pandas._testing.assert_numpy_array_equal |
import pandas as pd
from whylogs import get_or_create_session
def log_session(dataset_name, session_data):
session = get_or_create_session()
df = | pd.DataFrame(session_data) | pandas.DataFrame |
import time
import locale
import pandas as pd
import re
import configparser
from datetime import datetime, timedelta
from log import log, log_to_file, get_file_log
from queries.tournament_queries import find_tournament_by_id, q_update_tournament, q_create_tournament
from utils import get_chrome_driver, get_dataframe_json
config = configparser.ConfigParser()
config.read("config.ini")
TOURNAMENT_LOGS = get_file_log("tournament_updates")
def search_all_tournaments_atptour():
tournaments_atptour = None
driver = get_chrome_driver()
driver.get("https://www.atptour.com/en/tournaments")
time.sleep(3)
try:
atp_names = []
atp_formatted_names = []
atp_ids = []
elements = driver.find_elements_by_xpath("//tr[@class='tourney-result']/td[2]/a")
for elem in elements:
try:
url = elem.get_attribute("href")
url_regex = re.search("/tournaments/(.*)/(.*)/overview$", url)
atp_formatted_name = url_regex.group(1)
atp_id = int(url_regex.group(2))
atp_name = elem.text
atp_formatted_names.append(atp_formatted_name)
atp_ids.append(atp_id)
atp_names.append(atp_name)
except Exception as ex:
atp_formatted_names.append(None)
atp_ids.append(None)
atp_names.append(None)
msg = "atp tournaments retrieval error, tournament '{0}'".format(elem.text)
log_to_file(msg, TOURNAMENT_LOGS)
log("tournaments", msg, type(ex).__name__)
cities = []
countries = []
elements = driver.find_elements_by_xpath("//tr[@class='tourney-result']/td[2]/span[1]")
for elem in elements:
location = elem.text
try:
matched_location = location.split(", ")
city = matched_location[0]
country = matched_location[-1]
cities.append(city)
countries.append(country)
except Exception as ex:
cities.append(None)
countries.append(None)
msg = "atp tournaments retrieval error, location '{0}'".format(location)
log_to_file(msg, TOURNAMENT_LOGS)
log("tournaments", msg, type(ex).__name__)
start_dates = []
end_dates = []
elements = driver.find_elements_by_xpath("//tr[@class='tourney-result']/td[2]/span[2]")
for elem in elements:
date_elem = elem.text
try:
date_regex = re.search("^(.*) - (.*)$", date_elem)
start_date_str = date_regex.group(1)
start_date = datetime.strptime(start_date_str, '%Y.%m.%d')
end_date_str = date_regex.group(2)
end_date = datetime.strptime(end_date_str, '%Y.%m.%d')
end_date += timedelta(days=1)
start_dates.append(start_date)
end_dates.append(end_date)
except Exception as ex:
start_dates.append(None)
end_dates.append(None)
#print(type(ex).__name__)
#print("atp tournaments retrieval error, date_elem: '{0}'".format(date_elem))
tournaments_atptour = pd.DataFrame({"atp_id": atp_ids, "atp_name": atp_names,
"atp_formatted_name": atp_formatted_names, "city": cities,
"country": countries, "start_date": start_dates, "end_date": end_dates})
except Exception as ex:
msg = "Tournament header retrieval error"
log_to_file(msg, TOURNAMENT_LOGS)
log("tournaments", msg, type(ex).__name__)
driver.quit()
return tournaments_atptour
def get_tournament_name(flash_name):
""" Get tournament name from flashscore tournament name. Some tournaments name are between brackets
ex: 'Melbourne (Great Ocean Road Open)' -> 'Great Ocean Road Open' """
name_regex = re.search(r"\((.*)\)", flash_name)
if name_regex:
return name_regex.group(1)
else:
return flash_name
def search_tournament_atptour(tournament, date_of_matches):
flash_id = tournament["flash_id"]
tournaments_atptour = search_all_tournaments_atptour()
# Tournament already exists - Checking if it has kept same references on atptour
if "atp_id" in tournament.index and "atp_formatted_name" in tournament.index:
atp_id = tournament["atp_id"]
atp_formatted_name = tournament["atp_formatted_name"]
tour_matched = tournaments_atptour[(tournaments_atptour["atp_id"] == atp_id) & (
tournaments_atptour["atp_formatted_name"] == atp_formatted_name)]
# Tournament has kept same references
if len(tour_matched.index) == 1:
return tournament
# Tournament has new references (changed atp_id)
tour_matched = tournaments_atptour[tournaments_atptour["atp_formatted_name"] == atp_formatted_name]
if len(tour_matched.index) == 1:
# New tournament kept same formatted_name but new atp_id
new_atp_id = tour_matched.iloc[0]["atp_id"]
log_to_file("Tournament '{0}' changed atp_id from '{1}' to '{2}'".format(flash_id, atp_id, new_atp_id),
TOURNAMENT_LOGS)
tournament["atp_id"] = new_atp_id
return tournament
# Tournament has new references (changed atp_id and atp_formatted_name)
tournament_name = get_tournament_name(tournament["flash_name"])
tour_matched = tournaments_atptour[tournaments_atptour["atp_name"] == tournament_name]
if len(tour_matched.index) == 1:
# New tournament kept same formatted_name but new atp_id
new_atp_id = tour_matched.iloc[0]["atp_id"]
new_formatted_name = tour_matched.iloc[0]["atp_formatted_name"]
log_to_file("Tournament '{0}' changed atp_id from '{1}' to '{2}'".format(flash_id, atp_id, new_atp_id),
TOURNAMENT_LOGS)
log_to_file("Tournament '{0}' changed atp_formatted_name from '{1}' to '{2}'"
.format(flash_id, atp_formatted_name, new_formatted_name), TOURNAMENT_LOGS)
tournament["atp_id"] = new_atp_id
tournament["atp_formatted_name"] = new_formatted_name
return tournament
# Tournament new references not found
else:
msg = "Tournament '{0}' not found, atp_id: '{1}' and atp_formatted_name: '{2}'"\
.format(flash_id, atp_id, atp_formatted_name)
log_to_file(msg, TOURNAMENT_LOGS)
log("tournament_not_found", msg)
return None
# New tournament
else:
tournament_name = get_tournament_name(tournament["flash_name"])
country = tournament["country"]
tour_matched = tournaments_atptour[tournaments_atptour["atp_name"] == tournament_name]
if len(tour_matched.index) != 1:
# Tournament not found by name. Try to find tournament by start date, end date and country
tour_matched = tournaments_atptour[(tournaments_atptour["start_date"] <= pd.Timestamp(date_of_matches))
& (tournaments_atptour["end_date"] >= | pd.Timestamp(date_of_matches) | pandas.Timestamp |
import re, json, warnings, pickle, gensim
import pandas as pd
import numpy as np
# Primary visualizations
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import seaborn as sns
import plotly.express as px
# PCA visualization
from scipy.spatial.distance import cosine
from sklearn.metrics import pairwise
from sklearn.manifold import MDS, TSNE
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
# Import (Jupyter) Dash -- App Functionality
import dash, dash_table
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import dash_core_components as dcc
import dash_html_components as html
from jupyter_dash import JupyterDash
# Ignore simple warnings.
warnings.simplefilter('ignore', DeprecationWarning)
# Declare directory location to shorten filepaths later.
abs_dir = "/Users/quinn.wi/Documents/"
# Load model.
model = gensim.models.KeyedVectors.load_word2vec_format(abs_dir + 'Data/Output/WordVectors/jqa_w2v.txt')
# Load pca + tsne coordinates.
tsne_data = | pd.read_csv(abs_dir + '/Data/Output/WordVectors/jqa_w2v_tsne-coordinates.csv', sep = ',') | pandas.read_csv |
"""
Class Features
Name: drv_dataset_hmc_io_dynamic_forcing
Author(s): <NAME> (<EMAIL>)
Date: '20200401'
Version: '3.0.0'
"""
#######################################################################################
# Library
import logging
import warnings
import os
import re
import datetime
import numpy as np
import pandas as pd
import xarray as xr
from copy import deepcopy
from hmc.algorithm.io.lib_data_io_generic import swap_darray_dims_xy, create_darray_3d, create_darray_2d, \
write_dset, create_dset
from hmc.algorithm.io.lib_data_zip_gzip import zip_filename
from hmc.algorithm.utils.lib_utils_analysis import compute_domain_mean, \
compute_catchment_mean_serial, compute_catchment_mean_parallel_sync, compute_catchment_mean_parallel_async
from hmc.algorithm.utils.lib_utils_system import split_path, create_folder, copy_file
from hmc.algorithm.utils.lib_utils_string import fill_tags2string
from hmc.algorithm.utils.lib_utils_list import flat_list
from hmc.algorithm.utils.lib_utils_zip import add_zip_extension
from hmc.algorithm.default.lib_default_variables import variable_default_fields as dset_default_base
from hmc.algorithm.default.lib_default_args import logger_name, time_format_algorithm, time_format_datasets
from hmc.driver.dataset.drv_dataset_hmc_io_type import DSetReader, DSetComposer
# Log
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Class to configure datasets
class DSetManager:
# -------------------------------------------------------------------------------------
# Method to initialize class
def __init__(self, dset,
terrain_values=None, terrain_geo_x=None, terrain_geo_y=None, terrain_transform=None, terrain_bbox=None,
dset_list_format=None,
dset_list_type=None,
dset_list_group=None,
template_time=None, template_analysis_def=None,
model_tag='hmc', datasets_tag='datasets',
coord_name_geo_x='Longitude', coord_name_geo_y='Latitude', coord_name_time='time',
dim_name_geo_x='west_east', dim_name_geo_y='south_north', dim_name_time='time',
dset_write_engine='netcdf4', dset_write_compression_level=9, dset_write_format='NETCDF4_CLASSIC',
file_compression_mode=False, file_compression_ext='.gz',
**kwargs):
if dset_list_format is None:
dset_list_format = ['Gridded', 'Point', 'TimeSeries']
if dset_list_type is None:
dset_list_type = ['OBS', 'FOR']
if dset_list_group is None:
dset_list_group = ['OBS', 'FOR']
self.dset = dset
self.dset_list_format = dset_list_format
self.dset_list_type = dset_list_type
self.dset_list_group = dset_list_group
self.terrain_values = terrain_values
self.terrain_geo_x = terrain_geo_x
self.terrain_geo_y = terrain_geo_y
self.terrain_tranform = terrain_transform
self.terrain_bbox = terrain_bbox
self.da_terrain = create_darray_2d(self.terrain_values, self.terrain_geo_x, self.terrain_geo_y,
coord_name_x=coord_name_geo_x, coord_name_y=coord_name_geo_y,
dim_name_x=dim_name_geo_x, dim_name_y=dim_name_geo_y,
dims_order=[dim_name_geo_y, dim_name_geo_x])
self.model_tag = model_tag
self.datasets_tag = datasets_tag
self.coord_name_time = coord_name_time
self.coord_name_geo_x = coord_name_geo_x
self.coord_name_geo_y = coord_name_geo_y
self.dim_name_time = dim_name_time
self.dim_name_geo_x = dim_name_geo_x
self.dim_name_geo_y = dim_name_geo_y
self.file_name_tag = 'file_name'
self.folder_name_tag = 'folder_name'
self.var_period_tag = 'var_period'
dset_obj = {}
dset_fx = {}
dset_var_dict = {}
dset_vars_list = []
for dset_format in dset_list_format:
if dset_format in self.dset:
dset_tmp = self.dset[dset_format]
dset_obj[dset_format] = {}
dset_obj[dset_format][model_tag] = {}
dset_obj[dset_format][datasets_tag] = {}
dset_fx[dset_format] = {}
dset_fx[dset_format][datasets_tag] = {}
dset_var_dict[dset_format] = {}
dset_var_dict[dset_format][model_tag] = {}
file_name = dset_tmp['hmc_file_name']
file_folder = dset_tmp['hmc_file_folder']
file_format = dset_tmp['hmc_file_format']
file_frequency = dset_tmp['hmc_file_frequency']
file_vars = dset_tmp['hmc_file_variable']
dset_obj[dset_format][model_tag] = {}
dset_obj[dset_format][model_tag][self.file_name_tag] = file_name
dset_obj[dset_format][model_tag]['folder_name'] = file_folder
dset_obj[dset_format][model_tag]['frequency'] = file_format
dset_obj[dset_format][model_tag]['format'] = file_frequency
for dset_type in dset_list_type:
dset_obj[dset_format][datasets_tag][dset_type] = {}
dset_fx[dset_format][datasets_tag][dset_type] = {}
if file_vars[dset_type].__len__() > 0:
var_frequency = file_vars[dset_type]['var_frequency']
var_rounding = file_vars[dset_type]['var_rounding']
var_operation = file_vars[dset_type]['var_operation']
var_period = file_vars[dset_type]['var_period']
var_list = file_vars[dset_type]['var_list']
dset_fx_list = []
for var_key, var_value in var_list.items():
dset_obj[dset_format][datasets_tag][dset_type][var_key] = {}
dset_obj[dset_format][datasets_tag][dset_type][var_key][self.file_name_tag] = \
var_value['var_file_name']
dset_obj[dset_format][datasets_tag][dset_type][var_key][self.folder_name_tag] = \
var_value['var_file_folder']
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_dset'] = \
var_value['var_file_dset']
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_format'] = \
var_value['var_file_format']
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_limits'] = \
var_value['var_file_limits']
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_units'] = \
var_value['var_file_units']
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_frequency'] = var_frequency
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_rounding'] = var_rounding
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_operation'] = var_operation
dset_obj[dset_format][datasets_tag][dset_type][var_key]['var_period'] = var_period
if not dset_var_dict[dset_format][model_tag]:
dset_var_dict[dset_format][model_tag] = [var_value['var_file_dset']]
else:
value_list_tmp = dset_var_dict[dset_format][model_tag]
value_list_tmp.append(var_value['var_file_dset'])
idx_list_tmp = sorted([value_list_tmp.index(elem) for elem in set(value_list_tmp)])
value_list_filter = [value_list_tmp[idx_tmp] for idx_tmp in idx_list_tmp]
dset_var_dict[dset_format][model_tag] = value_list_filter
dset_vars_list.append(var_key)
dset_fx_list.append(var_operation)
for var_fx_step in dset_fx_list:
for var_fx_key_step, var_fx_flag_step in var_fx_step.items():
if var_fx_key_step not in list(dset_fx[dset_format][datasets_tag][dset_type].keys()):
dset_fx[dset_format][datasets_tag][dset_type][var_fx_key_step] = var_fx_flag_step
else:
var_fx_flag_tmp = dset_fx[dset_format][datasets_tag][dset_type][var_fx_key_step]
if var_fx_flag_tmp != var_fx_flag_step:
log_stream.error(' ===> Variable(s) operation is defined in two different mode!')
raise RuntimeError('Different operations are not allowed for the same group')
else:
dset_obj[dset_format][datasets_tag][dset_type] = None
dset_fx[dset_format][datasets_tag][dset_type] = None
self.dset_obj = dset_obj
self.dset_fx = dset_fx
self.dset_vars = list(set(dset_vars_list))
self.dset_lut = dset_var_dict
self.template_time = template_time
self.var_interp = 'nearest'
self.dset_write_engine = dset_write_engine
self.dset_write_compression_level = dset_write_compression_level
self.dset_write_format = dset_write_format
self.file_compression_mode = file_compression_mode
self.file_compression_ext = file_compression_ext
self.terrain_geo_x_llcorner = self.terrain_bbox[0]
self.terrain_geo_y_llcorner = self.terrain_bbox[1]
self.terrain_geo_cellsize = self.terrain_tranform[0]
self.file_attributes_dict = {'ncols': self.da_terrain.shape[1],
'nrows': self.da_terrain.shape[0],
'nodata_value': -9999.0,
'xllcorner': self.terrain_geo_x_llcorner,
'yllcorner': self.terrain_geo_y_llcorner,
'cellsize': self.terrain_geo_cellsize}
self.column_sep = ';'
self.list_sep = ':'
self.template_analysis_def = template_analysis_def
if self.template_analysis_def is not None:
self.list_variable_selected = ['Rain', 'AirTemperature']
self.tag_variable_fields = '{var_name}:hmc_forcing_datasets:{domain_name}'
self.flag_analysis_ts_domain = True
if 'analysis_catchment' in list(self.template_analysis_def.keys()):
self.flag_analysis_ts_catchment = self.template_analysis_def['analysis_catchment']
else:
self.flag_analysis_ts_catchment = False
if 'analysis_mp' in list(self.template_analysis_def.keys()):
self.flag_analysis_ts_catchment_mode = self.template_analysis_def['analysis_mp']
else:
self.flag_analysis_ts_catchment_mode = False
if 'analysis_cpu' in list(self.template_analysis_def.keys()):
self.flag_analysis_ts_catchment_cpu = self.template_analysis_def['analysis_cpu']
else:
self.flag_analysis_ts_catchment_cpu = 1
else:
self.list_variable_selected = ['Rain', 'AirTemperature']
self.tag_variable_fields = '{var_name}:hmc_forcing_datasets:{domain_name}'
self.flag_analysis_ts_domain = True
self.flag_analysis_ts_catchment = False
self.flag_analysis_ts_catchment_mode = False
self.flag_analysis_ts_catchment_cpu = 1
@staticmethod
def validate_flag(data_name, data_flag, flag_key_expected=None, flag_values_expected=None):
if flag_values_expected is None:
flag_values_expected = [None, True, False]
if flag_key_expected is None:
flag_key_expected = ['merge', 'split', 'dump', 'copy', 'analyze']
if data_flag is not None:
for flag_key, flag_value in data_flag.items():
if flag_key not in flag_key_expected:
log_stream.error(' ===> Datasets flag key "' + flag_key + '" is not allowed.')
raise KeyError('Flag key is not in the list of authorized flag keys')
if flag_value not in flag_values_expected:
log_stream.error(' ===> Datasets flag value "' + str(flag_value) + '" is not allowed.')
raise KeyError('Flag value is not in the list of authorized flag values')
if 'copy' in list(data_flag.keys()) and 'dump' in list(data_flag.keys()):
if data_flag['copy'] and data_flag['dump']:
log_stream.error(' ===> Flags "dump" and "copy" cannot be concurrently selected.')
raise RuntimeError('Flags have to be different using the allowed values' + str(flag_values_expected))
# if 'merge' in list(data_flag.keys()) and 'split' in list(data_flag.keys()):
# if data_flag['merge'] and data_flag['split']:
# log_stream.error(' ===> Flags "merge" and "split" cannot be concurrently selected.')
# raise RuntimeError('Flags have to be different using the allowed values' + str(flag_values_expected))
@staticmethod
def rename_filename(file_path_tmpl, file_path_ref):
folder_name_tmpl, file_name_tmpl = os.path.split(file_path_tmpl)
folder_name_ref, file_name_ref = os.path.split(file_path_ref)
time_match = re.search('\d{4}\d{2}\d{2}\d{2}\d{2}', file_name_ref)
time_stamp = pd.Timestamp(datetime.datetime.strptime(time_match.group(), time_format_datasets))
file_name_def = file_name_tmpl.format(dset_datetime_hmc=time_stamp.strftime(time_format_datasets))
file_path_def = os.path.join(folder_name_ref, file_name_def)
return file_path_def
def copy_data(self, dset_model_dyn, dset_source_dyn, columns_excluded=None, vars_selected=None):
# Starting info
log_stream.info(' -------> Copy data ... ')
if columns_excluded is None:
columns_excluded = ['index', 'File_Type']
var_model_list = list(dset_model_dyn.columns)
var_source_list = list(dset_source_dyn.columns)
var_model_filter = [var for var in var_model_list if var not in columns_excluded]
var_source_filter = [var for var in var_source_list if var not in columns_excluded]
file_dest_list = None
for var_model_step in var_model_filter:
file_model_step = list(dset_model_dyn[var_model_step].values)
for file_name_raw in file_model_step:
if isinstance(file_name_raw, str):
if self.column_sep in file_name_raw:
file_name_model_step = file_name_raw.split(self.column_sep)
else:
file_name_model_step = file_name_raw
if not isinstance(file_name_model_step, list):
file_name_model_step = [file_name_model_step]
if file_dest_list is None:
file_dest_list = [[] for i in range(file_name_model_step.__len__())]
for list_id, file_dest_step in enumerate(file_name_model_step):
if isinstance(file_dest_step, str):
file_dest_list[list_id].append(file_dest_step)
else:
log_stream.warning(' ===> Expected filename is not in string format!')
file_source_list = None
list_id_defined = None
for list_id, var_source_step in enumerate(var_source_filter):
log_stream.info(' --------> Variable ' + var_source_step + ' ... ')
file_source_step = list(dset_source_dyn[var_source_step].values)
file_source_tmp = []
for file_name_raw in file_source_step:
if isinstance(file_name_raw, str):
if self.column_sep in file_name_raw:
file_name_source_step = file_name_raw.split(self.column_sep)
else:
file_name_source_step = file_name_raw
if not isinstance(file_name_source_step, list):
file_name_source_step = [file_name_source_step]
for file_source_step in file_name_source_step:
if isinstance(file_source_step, str):
file_source_tmp.append(file_source_step)
else:
log_stream.warning(' ===> Expected filename is not in string format!')
if file_source_list is None:
file_source_list = []
if not file_source_tmp:
file_source_list.append([]) # condition for empty datasets
# file_source_list = None
else:
file_source_list.append(file_source_tmp)
if list_id_defined is None:
list_id_defined = 0
else:
list_id_defined += 1
if (file_dest_list is not None) and (file_source_list is not None):
if file_dest_list.__len__() > file_source_list.__len__():
file_dest_select = flat_list(file_dest_list)
file_source_select = file_source_list[list_id_defined]
elif file_dest_list.__len__() == file_source_list.__len__():
file_dest_select = file_dest_list[list_id]
file_source_select = file_source_list[list_id]
elif file_dest_list.__len__() < file_source_list.__len__():
file_dest_select = flat_list(file_dest_list)
file_source_select = file_source_list[list_id]
else:
log_stream.error(' ===> Copy failed for unexpected number of destination or source filenames')
raise IOError('Source and destination filenames have to be equal')
if file_dest_select and file_source_select:
warning_message_print = True
for file_path_dest_step, file_path_source_step in zip(file_dest_select, file_source_select):
folder_name_source_step, file_name_source_step = split_path(file_path_source_step)
folder_name_dest_step, file_name_dest_step = split_path(file_path_dest_step)
if os.path.exists(file_path_source_step):
if var_source_step in vars_selected:
if not os.path.exists(file_path_dest_step):
create_folder(folder_name_dest_step)
copy_file(file_path_source_step, file_path_dest_step)
elif var_source_step not in vars_selected:
if warning_message_print:
log_stream.warning(' ===> Variable: ' + var_source_step +
' is not expected for this datasets')
warning_message_print = False
else:
log_stream.warning(' ===> Copy file: ' + file_name_source_step +
' FAILED. File does not exist!')
log_stream.info(' --------> Variable ' + var_source_step + ' ... DONE')
else:
log_stream.warning(' ===> Copy file: ... FAILED. Datasets are undefined')
log_stream.info(' --------> Variable ' + var_source_step + ' ... SKIPPED')
else:
log_stream.warning(' ===> Copy file: ... FAILED. All files do not exist')
log_stream.info(' --------> Variable ' + var_source_step + ' ... SKIPPED')
# Ending info
log_stream.info(' -------> Copy data ... DONE')
def freeze_data(self, dset_expected, dset_def, dset_key_delimiter=':', dset_key_excluded=None):
# Starting info
log_stream.info(' -------> Freeze data ... ')
if dset_key_excluded is None:
dset_key_excluded = ['index', 'File_Type', 'terrain']
if dset_def is not None:
dset_vars_expected = self.dset_vars
dset_check = False
dframe_check = False
if isinstance(dset_def, xr.Dataset):
dset_vars_def = list(dset_def.data_vars)
dset_check = True
elif isinstance(dset_def, pd.DataFrame):
dset_vars_def = list(dset_def.columns)
dframe_check = True
else:
log_stream.error(' ===> Freeze data type is not implemented')
raise NotImplementedError('Data type is unknown for freezing data')
dset_vars_tmp = deepcopy(dset_vars_def)
for dset_var_step in dset_vars_tmp:
if dset_var_step in dset_key_excluded:
dset_vars_def.remove(dset_var_step)
for dset_var_step in dset_vars_def:
if dset_key_delimiter in dset_var_step:
dset_var_root = dset_var_step.split(dset_key_delimiter)[0]
else:
dset_var_root = dset_var_step
if dset_vars_expected[0] == 'ALL':
if dset_check:
if dset_var_step not in ['terrain', 'mask']:
values_nan = np.zeros([dset_expected.index.__len__()])
values_nan[:] = np.nan
dset_expected[dset_var_step] = values_nan
if 'time' in list(dset_def[dset_var_step].dims):
time_array = dset_def[dset_var_step]['time'].values
else:
if 'time' in list(dset_def.dims):
time_array = dset_def['time'].values
else:
log_stream.error(' ===> Freeze time array is not defined for variables')
raise NotImplementedError('Time array is unknown for freezing data')
time_stamp_list = []
for time_step in time_array:
time_stamp = pd.to_datetime(time_step, format='%Y-%m-%d_%H:%M:%S')
time_stamp_list.append(time_stamp)
dset_idx = pd.DatetimeIndex(time_stamp_list)
dset_values = dset_def[dset_var_step].values
dset_expected.loc[dset_idx, dset_var_step] = dset_values
elif dframe_check:
dset_idx = dset_def[dset_var_step].index
dset_values = dset_def[dset_var_step].values
dset_expected.loc[dset_idx, dset_var_step] = dset_values
else:
log_stream.error(' ===> Freeze data type for ALL variables is not implemented')
raise NotImplementedError('Data type is unknown for freezing data')
elif dset_var_root in dset_vars_expected:
if dset_var_step not in list(dset_expected.columns):
values_nan = np.zeros([dset_expected.index.__len__()])
values_nan[:] = np.nan
dset_expected[dset_var_step] = values_nan
if dset_check:
time_array = dset_def[dset_var_step].time.values
time_stamp_list = []
for time_step in time_array:
time_stamp = pd.to_datetime(time_step, format='%Y-%m-%d_%H:%M:%S')
time_stamp_list.append(time_stamp)
dset_idx = pd.DatetimeIndex(time_stamp_list)
dset_values = dset_def[dset_var_step].values
elif dframe_check:
dset_idx = dset_def[dset_var_step].index
dset_values = dset_def[dset_var_step].values
else:
log_stream.error(' ===> Freeze data type for variable is not implemented')
raise NotImplementedError('Data type is unknown for freezing data')
dset_expected.loc[dset_idx, dset_var_step] = dset_values
else:
pass
# Ending info
log_stream.info(' -------> Freeze data ... DONE')
return dset_expected
def dump_data(self, dset_model, dset_time, dset_source):
# Starting info
log_stream.info(' -------> Dump data ... ')
dump_status_list = []
file_path_list_unzip = []
file_path_list_zip = []
for time_step in dset_time:
log_stream.info(' --------> TimeStep ' + str(time_step) + ' ... ')
if time_step in list(dset_source[self.dim_name_time].values):
# Check dataset
log_stream.info(' ---------> Verify datasets structure ... ')
dset_step = dset_source.sel(time=time_step)
dset_var_list = list(dset_step.data_vars)
for dset_var_name in dset_var_list:
da_array = dset_step[dset_var_name]
log_stream.info(' ----------> Variable ' + dset_var_name + ' ... ')
if np.isnan(da_array).all().values:
log_stream.info(' ----------> Variable ' + dset_var_name + ' ... IS EMPTY. REMOVE FROM DATASET')
dset_step = dset_step.drop_vars([dset_var_name])
elif not np.isnan(da_array).all().values:
log_stream.info(' ----------> Variable ' + dset_var_name + ' ... IS NOT EMPTY. PASS')
log_stream.info(' ---------> Verify datasets structure ... DONE')
# Prepare dataset
dset_file_path_step = dset_model.loc[time_step][self.model_tag]
dset_file_folder_step, dset_file_name_step = split_path(dset_file_path_step)
create_folder(dset_file_folder_step)
if self.file_compression_mode:
if not dset_file_name_step.endswith(self.file_compression_ext):
log_stream.warning(
' ===> File expected in zipped format with ' + self.file_compression_ext + ' extension. Got '
+ dset_file_name_step + ' filename. Add extension to given filename')
dset_file_name_step = add_zip_extension(dset_file_name_step, self.file_compression_ext)
if dset_file_name_step.endswith(self.file_compression_ext):
dset_file_name_step_zip = dset_file_name_step
dset_file_name_step_unzip = os.path.splitext(dset_file_name_step)[0]
else:
dset_file_name_step_zip = dset_file_name_step
dset_file_name_step_unzip = dset_file_name_step
self.file_compression_mode = True
dset_file_path_step_zip = os.path.join(dset_file_folder_step, dset_file_name_step_zip)
dset_file_path_step_unzip = os.path.join(dset_file_folder_step, dset_file_name_step_unzip)
# Write dataset
log_stream.info(' ---------> Write datasets to filename "' + dset_file_name_step_unzip + '" ... ')
dset_attrs = self.file_attributes_dict
write_dset(dset_file_path_step_unzip,
dset_data=dset_step, dset_attrs=dset_attrs, dset_format=self.dset_write_format,
dset_compression=self.dset_write_compression_level, dset_engine=self.dset_write_engine)
log_stream.info(' ---------> Write datasets to filename "' + dset_file_name_step_unzip + '" ... DONE')
file_path_list_unzip.append(dset_file_path_step_unzip)
file_path_list_zip.append(dset_file_path_step_zip)
dump_status_list.append(True)
log_stream.info(' --------> TimeStep ' + str(time_step) + ' ... DONE')
else:
log_stream.info(' --------> TimeStep ' + str(time_step) + ' ... SKIPPED')
log_stream.warning(' ===> Dump time step ' + str(time_step) + ' is not in datasets time period')
# Ending info
log_stream.info(' -------> Dump data ... DONE')
# Starting info
log_stream.info(' -------> Zip data ... ')
file_compression_mode = self.file_compression_mode
if file_compression_mode:
for file_path_unzip, file_path_zip, dump_status in zip(file_path_list_unzip, file_path_list_zip,
dump_status_list):
if dump_status:
if os.path.exists(file_path_zip):
os.remove(file_path_zip)
if os.path.exists(file_path_unzip):
zip_filename(file_path_unzip, file_path_zip)
else:
# Ending info
log_stream.warning(' -------> Zip data ... SKIPPED. File ' + file_path_unzip + ' not available')
if os.path.exists(file_path_zip) and os.path.exists(file_path_unzip):
os.remove(file_path_unzip)
else:
# Ending info
log_stream.warning(' -------> Zip data ... SKIPPED. File' + file_path_unzip + ' not saved')
# Ending info
log_stream.info(' -------> Zip data ... DONE')
else:
# Ending info
log_stream.info(' -------> Zip data ... SKIPPED. Zip not activated')
def organize_data(self, dset_time, dset_source, dset_static=None, dset_variable_selected='ALL'):
# Get variable(s)
dset_vars = dset_source[self.datasets_tag]
# Get terrain reference
da_terrain = self.da_terrain
if dset_static is not None:
if 'mask_name_list' in list(dset_static.keys()):
mask_name_obj = dset_static['mask_name_list']
else:
mask_name_obj = None
else:
mask_name_obj = None
if dset_vars is not None:
if (self.coord_name_geo_x in list(dset_source.keys())) and (self.coord_name_geo_x in list(dset_source.keys())):
if dset_variable_selected == 'ALL':
dset_variable_selected = dset_vars.variables
log_stream.info(' -------> Organize gridded datasets ... ')
if dset_variable_selected is not None:
# Get geographical information
geo_x_values = dset_source[self.coord_name_geo_x]
geo_y_values = dset_source[self.coord_name_geo_y]
# Iterate over dataset(s)
var_dset_collections = None
var_dset_out = None
for var_name_step in dset_vars:
log_stream.info(' --------> Organize ' + var_name_step + ' datasets ... ')
if var_name_step in dset_variable_selected:
# Get data array
var_da_step = dset_vars[var_name_step]
if var_da_step is not None:
# Get dataset coordinates list
dims_list = list(var_da_step.dims)
if 'time' in dims_list:
time_stamp_period = []
for time_step in var_da_step['time'].values:
timestamp_step = pd.to_datetime(time_step, format='%Y-%m-%d_%H:%M:%S')
if isinstance(timestamp_step, pd.DatetimeIndex):
timestamp_step = timestamp_step[0]
elif isinstance(timestamp_step, pd.Timestamp):
pass
else:
log_stream.error(' ===> Time type is not allowed')
raise NotImplementedError('Case not implemented yet')
timestamp_step = timestamp_step.round('H')
time_stamp_period.append(timestamp_step)
dset_time_step = pd.DatetimeIndex(time_stamp_period)
if isinstance(dset_time, pd.Timestamp):
dset_time = pd.DatetimeIndex([dset_time])
# Recompute period in case of time_start and/or time end are not the same
dset_time_step_start = dset_time_step.values[0]
dset_time_step_end = dset_time_step.values[-1]
dset_time_start = dset_time.values[0]
dset_time_end = dset_time.values[-1]
if dset_time_step_start < dset_time_start:
index_start_step_tmp = dset_time_step.get_loc(dset_time_start)
index_start_tmp = dset_time.get_loc(dset_time_start)
elif dset_time_step_start == dset_time_start:
index_start_step_tmp = 0
index_start_tmp = 0
else:
log_stream.error(' ===> Time start is greater than time start step.')
log_stream.error(' ===> Errors occurred for unreal condition')
raise NotImplementedError('Case not implemented yet')
if dset_time_step_end < dset_time_end:
index_end_step_tmp = dset_time_step.get_loc(dset_time_step_end) + 1
index_end_tmp = dset_time.get_loc(dset_time_step_end) + 1
elif dset_time_step_end == dset_time_end:
index_end_step_tmp = dset_time_step.get_loc(dset_time_end) + 1
index_end_tmp = dset_time.get_loc(dset_time_end) + 1
elif dset_time_step_end > dset_time_end:
index_end_step_tmp = dset_time_step.get_loc(dset_time_end) + 1
index_end_tmp = dset_time.get_loc(dset_time_end) + 1
else:
log_stream.error(' ===> Matching between time start and time start step failed')
log_stream.error(' ===> Errors occurred for unknown reason')
raise NotImplementedError('Case not implemented yet')
if dset_time_step.shape[0] == 1:
dset_time_step = pd.DatetimeIndex([dset_time_step[0]])
#var_da_step = var_da_step
dset_time = pd.DatetimeIndex([dset_time[0]])
elif dset_time_step.shape[0] > 1:
dset_time_step = dset_time_step[index_start_step_tmp:index_end_step_tmp] # datatimeindex
var_da_step = var_da_step[:, :, index_start_step_tmp:index_end_step_tmp] # dataarray 3d
dset_time = dset_time[index_start_tmp:index_end_tmp] # datatimeindex
else:
log_stream.error(' ===> Expected time-series with length >= 1')
raise NotImplementedError('Case not implemented yet')
# Search index of longitude and latitude
if self.dim_name_geo_x in dims_list:
dim_idx_geo_x = dims_list.index(self.dim_name_geo_x)
else:
log_stream.error(' ===> Dimension X is wrong defined.')
raise IOError('Check netcdf datasets for dims definition')
if self.dim_name_geo_y in dims_list:
dim_idx_geo_y = dims_list.index(self.dim_name_geo_y)
else:
log_stream.error(' ===> Dimension Y is wrong defined.')
raise IOError('Check netcdf datasets for dims definition')
# Get variable, data, time and attributes of expected data
var_data_expected = np.zeros(
[var_da_step.shape[dim_idx_geo_y], var_da_step.shape[dim_idx_geo_x],
dset_time.shape[0]])
# Check datasets dimensions and in case of mismatching try to correct
if (var_data_expected.shape[0] == da_terrain.shape[1]) and (
var_data_expected.shape[1] == da_terrain.shape[0]):
var_data_expected = np.zeros([da_terrain.shape[0], da_terrain.shape[1],
dset_time.shape[0]])
log_stream.info(' --------> ' + var_name_step +
' datasets and terrain datasets have the same dimensions'
' in different order found by using the automatic detection')
log_stream.warning(' ===> Use terrain dimensions to try datasets analysis')
active_interp_method = True
elif var_data_expected.shape[:2] != da_terrain.shape:
var_data_expected = np.zeros([geo_x_values.shape[0], geo_y_values.shape[1],
dset_time.shape[0]])
log_stream.info(' --------> ' + var_name_step +
' datasets and terrain datasets have not the same dimensions'
' found by using the automatic detection')
log_stream.warning(' ===> Use datasets dimensions to try datasets analysis')
active_interp_method = True
elif var_data_expected.shape[:2] == da_terrain.shape:
log_stream.info(' --------> ' + var_name_step +
' datasets and terrain datasets have the same dimensions'
' found by using the automatic detection')
active_interp_method = False
else:
log_stream.error(' --------> ' + var_name_step +
' datasets and terrain datasets give an error'
' by using the automatic detection')
raise IOError('Check your static and forcing datasets')
var_data_expected[:, :, :] = np.nan
# Get variable, data, time and attributes of expected data
var_da_expected = create_darray_3d(
var_data_expected, dset_time, geo_x_values, geo_y_values,
coord_name_time=self.coord_name_time,
coord_name_x=self.coord_name_geo_x, coord_name_y=self.coord_name_geo_y,
dim_name_time=self.dim_name_time,
dim_name_x=self.dim_name_geo_x, dim_name_y=self.dim_name_geo_y,
dims_order=[self.dim_name_geo_y, self.dim_name_geo_x, self.dim_name_time])
# Swap data arrays dimensions (is needed for mismatching in data input)
var_da_step = swap_darray_dims_xy(var_da_expected, var_da_step, da_terrain)
# Combine raw and expected data arrays
if dset_time.shape[0] > 1:
var_da_combined = var_da_expected.combine_first(var_da_step) # dataarray 3d
elif dset_time.shape[0] == 1:
var_da_combined = deepcopy(var_da_step)
var_da_combined.name = None
else:
log_stream.error(' ===> Expected datasets with length >= 1')
raise NotImplementedError('Case not implemented yet')
# Select only selected time-steps
if dset_time.shape[0] > 1:
dset_time_intersect = dset_time_step.intersection(dset_time)
if dset_time_intersect.shape == dset_time.shape:
var_da_selected = var_da_combined.sel(time=dset_time)
else:
log_stream.error(
' ===> All/some selected time-steps are not available in source data.')
raise IOError('Datasets are not on the same period or sub-period.')
elif dset_time.shape[0] == 1:
var_da_selected = deepcopy(var_da_combined)
else:
log_stream.error(' ===> Expected datasets with length >= 1')
raise NotImplementedError('Case not implemented yet')
# Perform interpolation and masking of datasets
if active_interp_method:
# Interpolation info start
log_stream.info(' ---------> Interpolate ' + var_name_step +
' datasets ... ')
# Configure data array with longitude/latitude coordinates
var_da_selected_tmp = create_darray_3d(
var_da_selected.values, dset_time, geo_x_values, geo_y_values,
coord_name_time=self.coord_name_time,
coord_name_x='Longitude', coord_name_y='Latitude',
dim_name_time=self.dim_name_time,
dim_name_x='Longitude', dim_name_y='Latitude',
dims_order=['Latitude', 'Longitude', self.dim_name_time])
if self.var_interp == 'nearest':
# Interpolation method info start
log_stream.info(' ----------> Apply ' + self.var_interp + ' method ... ')
# Apply the interpolation method
var_da_interp_tmp = var_da_selected_tmp.interp(
Latitude=self.da_terrain['Latitude'],
Longitude=self.da_terrain['Longitude'], method='nearest')
# Interpolation method info end
log_stream.info(' ----------> Apply method ' + self.var_interp + ' ... DONE')
else:
# Ending info for undefined function
log_stream.error(' ===> Interpolation method ' +
self.var_interp + ' not available')
raise NotImplemented('Interpolation method not implemented yet')
# Configure the data array with west_east/south_north coordinates
var_da_interp = create_darray_3d(
var_da_interp_tmp.values, dset_time,
self.da_terrain['Longitude'].values, self.da_terrain['Latitude'].values,
coord_name_time=self.coord_name_time,
coord_name_x=self.coord_name_geo_x, coord_name_y=self.coord_name_geo_y,
dim_name_time=self.dim_name_time,
dim_name_x=self.dim_name_geo_x, dim_name_y=self.dim_name_geo_y,
dims_order=[self.dim_name_geo_y, self.dim_name_geo_x,
self.dim_name_time])
# Interpolation info end
log_stream.info(' ---------> Interpolate ' + var_name_step +
' datasets ... DONE')
else:
var_da_interp = deepcopy(var_da_selected)
# Mask the data array variable over the terrain reference data array
var_da_masked = var_da_interp.where((self.da_terrain != -9999) &
(var_da_interp != -9999))
else:
if isinstance(dset_time, pd.Timestamp):
dset_time = pd.DatetimeIndex([dset_time])
# Mask the data array variable over the terrain reference data array
var_da_masked = var_da_step.where((self.da_terrain != -9999) &
(var_da_step != -9999))
if var_da_masked.ndim == 3:
list_dims = list(var_da_masked.dims)
if self.dim_name_geo_x in list_dims:
list_dims.remove(self.dim_name_geo_x)
if self.dim_name_geo_y in list_dims:
list_dims.remove(self.dim_name_geo_y)
dim_name = list_dims[0]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
var_da_masked = var_da_masked.mean(dim=dim_name)
var_da_masked = var_da_masked.expand_dims('time', axis=-1)
var_da_masked.assign_coords({'time': dset_time})
elif var_da_masked.ndim > 3:
log_stream.error(' ===> Variable dimensions not allowed')
raise IOError('Case not implemented yet')
log_stream.info(' --------> Organize ' + var_name_step + ' datasets ... DONE')
else:
log_stream.info(' --------> Organize ' + var_name_step + ' datasets ... FAILED')
log_stream.warning(' ===> Variable is None. Default initialization was selecting')
var_data_null = np.zeros([var_da_step.shape[0], var_da_step.shape[1], dset_time.shape[0]])
var_data_null[:, :, :] = np.nan
var_da_masked = create_darray_3d(
var_data_null, dset_time, geo_x_values, geo_y_values,
coord_name_time=self.coord_name_time,
coord_name_x=self.coord_name_geo_x, coord_name_y=self.coord_name_geo_y,
dim_name_time=self.dim_name_time,
dim_name_x=self.dim_name_geo_x, dim_name_y=self.dim_name_geo_y,
dims_order=[self.dim_name_geo_y, self.dim_name_geo_x, self.dim_name_time])
# Organize data in a common datasets
var_dset_grid_step = create_dset(var_data_time=dset_time,
var_data_name=var_name_step, var_data_values=var_da_masked,
var_data_attrs=None,
var_geo_1d=False,
file_attributes=self.file_attributes_dict,
var_geo_name='terrain', var_geo_values=self.terrain_values,
var_geo_x=self.terrain_geo_x, var_geo_y=self.terrain_geo_y,
var_geo_attrs=None)
# Organize data in merged datasets
if var_dset_out is None:
var_dset_out = var_dset_grid_step
else:
var_dset_out = var_dset_out.merge(var_dset_grid_step, join='right')
else:
log_stream.info(' --------> Organize ' + var_name_step +
' datasets ... SKIPPED. Variable is not selected for analysis.')
# Time-Series Analysis
log_stream.info(' --------> Compute time-series analysis over domain ... ')
if self.flag_analysis_ts_domain:
var_dset_ts_domain = compute_domain_mean(
var_dset_out, tag_variable_fields=self.tag_variable_fields,
template_variable_domain='DomainAverage')
if var_dset_collections is None:
var_dset_collections = var_dset_ts_domain
else:
var_dset_collections = var_dset_collections.merge(var_dset_ts_domain, join='right')
log_stream.info(' --------> Compute time-series analysis over domain ... DONE')
else:
log_stream.info(' --------> Compute time-series analysis over domain ... SKIPPED. '
'Analysis not activated')
log_stream.info(' --------> Compute time-series analysis over catchments ... ')
if self.flag_analysis_ts_catchment:
if mask_name_obj is not None:
if not self.flag_analysis_ts_catchment_mode:
var_dset_ts_catchment = compute_catchment_mean_serial(
var_dset_out, mask_name_obj,
variable_domain_fields=self.tag_variable_fields,
variable_selected_list=self.list_variable_selected)
elif self.flag_analysis_ts_catchment_mode:
# var_dset_ts_catchment = compute_catchment_mean_parallel_sync(
# var_dset_out, mask_name_obj,
# cpu_n=self.flag_analysis_ts_catchment_cpu,
# variable_domain_fields=self.tag_variable_fields,
# variable_selected_list=self.list_variable_selected)
var_dset_ts_catchment = compute_catchment_mean_parallel_async(
var_dset_out, mask_name_obj,
cpu_n=self.flag_analysis_ts_catchment_cpu,
variable_domain_fields=self.tag_variable_fields,
variable_selected_list=self.list_variable_selected)
else:
log_stream.error(' ===> Catchments analysis mode not allowed')
raise RuntimeError('Unexpected catchments analysis condition')
if var_dset_collections is None:
var_dset_collections = var_dset_ts_catchment
else:
var_dset_collections = var_dset_collections.merge(var_dset_ts_catchment, join='right')
log_stream.info(' --------> Compute time-series analysis over catchments ... DONE')
else:
log_stream.info(' --------> Compute time-series analysis over catchments ... SKIPPED.'
' Catchment mask obj not defined')
else:
log_stream.info(' --------> Compute time-series analysis over catchments ... SKIPPED.'
' Analysis not activated')
log_stream.info(' -------> Organize gridded datasets ... DONE')
else:
var_dset_out = None
var_dset_collections = None
log_stream.info(' -------> Organize gridded datasets ... SKIPPED. Empty selected variables.')
else:
log_stream.info(' -------> Organize point and time-series datasets ... ')
if dset_variable_selected is not None:
if dset_variable_selected == 'ALL':
dset_variable_selected = list(dset_vars.keys())
if isinstance(dset_time, pd.Timestamp):
dset_time = pd.DatetimeIndex([dset_time])
var_dset_collections = pd.DataFrame({self.dim_name_time: dset_time})
for dset_key, dset_group in dset_vars.items():
log_stream.info(' --------> Organize ' + dset_key + ' datasets ... ')
if dset_key in dset_variable_selected:
for dset_sub_key, dset_sub_group in dset_group.items():
for dset_sub_col in list(dset_sub_group.columns):
var_key = ':'.join([dset_key, dset_sub_key, dset_sub_col])
var_data = dset_sub_group[dset_sub_col].values
var_dset_collections.loc[:, var_key] = pd.Series(data=var_data).fillna(value=pd.NA)
log_stream.info(' --------> Organize ' + dset_key + ' datasets ... DONE')
else:
log_stream.info(' --------> Organize ' + var_name_step +
' datasets ... SKIPPED. Variable is not selected for analysis.')
var_dset_collections = var_dset_collections.reset_index()
var_dset_collections = var_dset_collections.set_index(self.dim_name_time)
var_dset_out = None
log_stream.info(' -------> Organize point and time-series datasets ... DONE')
else:
var_dset_out = None
var_dset_collections = None
log_stream.info(
' -----> Organize point and time-series datasets ... SKIPPED. Empty selected variables.')
else:
log_stream.warning(' ===> Datasets is None. All variable(s) are undefined')
var_dset_out = None
var_dset_collections = None
return var_dset_out, var_dset_collections
def collect_data(self, dset_model_dyn, dset_source_dyn, dset_source_base,
dset_static_info=None,
columns_excluded=None, dset_time_info=None,
dset_time_start=None, dset_time_end=None, **kwargs):
if columns_excluded is None:
columns_excluded = ['index', 'File_Type']
var_args = {}
if 'plant_name_list' in kwargs:
var_args['plant_name_list'] = kwargs['plant_name_list']
if 'release_name_list' in kwargs: #add20210608
var_args['release_name_list'] = kwargs['release_name_list'] #add20210608
da_terrain = self.da_terrain
file_source_vars_tmp = list(dset_source_dyn.columns)
file_source_vars_def = [elem for elem in file_source_vars_tmp if elem not in columns_excluded]
var_frame = {}
dset_source = None
for var_name in file_source_vars_def:
log_stream.info(' -------> Collect ' + var_name + ' source datasets ... ')
if dset_source_base is not None:
if var_name in list(dset_source_base.keys()):
dset_source_var_base = dset_source_base[var_name]
dset_source_var_dyn = dset_source_dyn[var_name]
if var_name in list(dset_default_base.keys()):
dset_default_var_base = dset_default_base[var_name]
else:
dset_default_var_base = None
dset_datetime_idx = dset_source_var_dyn.index
dset_filename = dset_source_var_dyn.values
if 'format' in dset_source_base:
dset_format = dset_source_base['format']
else:
dset_format = None
if dset_static_info is not None:
if var_name == 'Discharge':
var_static_info = dset_static_info['outlet_name_list']
elif (var_name == 'DamV') or (var_name == 'DamL'):
var_static_info = dset_static_info['dam_name_list']
elif (var_name == 'IntakeQ'): #add20210607
var_static_info = dset_static_info['plant_name_list'] #add20210607
elif var_name == 'VarAnalysis':
var_static_info = None
else:
var_static_info = None
else:
var_static_info = None
driver_hmc_parser = DSetReader(dset_filename, dset_source_var_base, dset_datetime_idx,
dset_time_info, var_format=dset_format)
obj_var, da_time, geo_x, geo_y = driver_hmc_parser.read_filename_dynamic(
var_name, var_args, var_time_start=dset_time_start, var_time_end=dset_time_end,
var_static_info=var_static_info)
if obj_var is not None:
if isinstance(obj_var, xr.Dataset):
# Organize datasets name
log_stream.info(' --------> Organize ' + var_name + ' dataset name ... ')
obj_var_name_list = list(obj_var.data_vars)
if obj_var_name_list.__len__() == 1:
log_stream.info(' ---------> Variable list: ' + str(obj_var_name_list) +
' with 1 item')
obj_var_name = obj_var_name_list[0]
if obj_var_name != var_name:
obj_var = obj_var.rename_vars({obj_var_name: var_name})
log_stream.warning(' ===> Switch variable name in dataset from "' +
obj_var_name + '" to "' + var_name + '"')
else:
log_stream.info(' ---------> Variable list: ' + str(obj_var_name_list) +
' with ' + str(obj_var_name_list.__len__()) + ' items')
log_stream.info(' --------> Organize ' + var_name + ' dataset name ... DONE')
# Organize datasets units
log_stream.info(' --------> Organize ' + var_name + ' dataset units ... ')
driver_hmc_composer = DSetComposer(dset_filename, dset_source_var_base,
dset_datetime_idx, time_dst_info=dset_time_info)
obj_var = driver_hmc_composer.validate_data_units(var_name, obj_var, dset_default_var_base)
log_stream.info(' --------> Organize ' + var_name + ' dataset units ... ')
# Organize datasets geographical domain
log_stream.info(' --------> Organize ' + var_name + ' dataset geographical domain ... ')
if ((obj_var['west_east'].shape[0] != da_terrain['west_east'].shape[0]) or
(obj_var['south_north'].shape[0] != da_terrain['south_north'].shape[0])):
# Interpolation info start
log_stream.info(' ---------> Interpolate ' + var_name + ' datasets ... ')
# Configure data array with longitude/latitude coordinates
var_da_src = create_darray_3d(
obj_var[var_name].values, da_time, geo_x, geo_y,
coord_name_time=self.coord_name_time,
coord_name_x='Longitude', coord_name_y='Latitude',
dim_name_time=self.dim_name_time,
dim_name_x='Longitude', dim_name_y='Latitude',
dims_order=['Latitude', 'Longitude', self.dim_name_time])
if self.var_interp == 'nearest':
# Interpolation method info start
log_stream.info(' -----------> Apply ' + self.var_interp + ' method ... ')
# Apply the interpolation method
var_da_interp_tmp = var_da_src.interp(
Latitude=da_terrain['Latitude'],
Longitude=da_terrain['Longitude'], method='nearest')
# Interpolation method info end
log_stream.info(' -----------> Apply ' + self.var_interp + ' method ... DONE')
else:
# Ending info for undefined function
log_stream.error(' ===> Interpolation method ' +
self.var_interp + ' not available')
raise NotImplemented('Interpolation method not implemented yet')
# Configure the data array with west_east/south_north coordinates
var_da_interp = create_darray_3d(
var_da_interp_tmp, da_time,
da_terrain['Longitude'].values, da_terrain['Latitude'].values,
coord_name_time=self.coord_name_time,
coord_name_x=self.coord_name_geo_x, coord_name_y=self.coord_name_geo_y,
dim_name_time=self.dim_name_time,
dim_name_x=self.dim_name_geo_x, dim_name_y=self.dim_name_geo_y,
dims_order=[self.dim_name_geo_y, self.dim_name_geo_x,
self.dim_name_time])
var_da_masked = var_da_interp.where((da_terrain != -9999))
obj_var = var_da_masked.to_dataset(name=var_name)
geo_x = da_terrain['Longitude'].values
geo_y = da_terrain['Latitude'].values
# Interpolation info end
log_stream.info(' ---------> Interpolate ' + var_name + ' datasets ... DONE')
log_stream.info(' --------> Organize ' + var_name + ' dataset geographical domain ... DONE')
log_stream.info(' --------> Organize ' + var_name + ' dataset time period ... ')
if obj_var[self.coord_name_time].shape[0] < dset_datetime_idx.shape[0]:
if var_name != 'ALL':
log_stream.info(' ---------> Fill expected datasets with dynamic values ... ')
var_x_tmp = da_terrain['Longitude'].values.shape[0]
var_y_tmp = da_terrain['Latitude'].values.shape[0]
var_time_tmp = dset_datetime_idx.shape[0]
var_values_period = obj_var[var_name].values
var_time_period = da_time.values
var_values_tmp = np.zeros([var_y_tmp, var_x_tmp, var_time_tmp])
var_values_tmp[:, :, :] = np.nan
for var_time_i, var_time_step in enumerate(var_time_period):
var_time_idx = dset_datetime_idx.indexer_at_time(pd.Timestamp(var_time_step))[0]
var_values_step = var_values_period[:, :, var_time_i]
var_values_tmp[:, :, var_time_idx] = var_values_step
var_da_tmp = create_darray_3d(
var_values_tmp, dset_datetime_idx, geo_x, geo_y,
coord_name_time=self.coord_name_time,
coord_name_x=self.coord_name_geo_x, coord_name_y=self.coord_name_geo_y,
dim_name_time=self.dim_name_time,
dim_name_x=self.dim_name_geo_x, dim_name_y=self.dim_name_geo_y,
dims_order=[self.dim_name_geo_y, self.dim_name_geo_x, self.dim_name_time])
obj_var = var_da_tmp.to_dataset(name=var_name)
log_stream.info(' ---------> Fill expected datasets with dynamic values ... DONE')
log_stream.info(' --------> Organize ' + var_name + ' dataset time period ... DONE')
# Organize a common dataset for all variable(s)
log_stream.info(' --------> Organize ' + var_name + ' dataset in a common dataset ... ')
if self.coord_name_geo_x not in list(var_frame.keys()):
var_frame[self.coord_name_geo_x] = geo_x
if self.coord_name_geo_y not in list(var_frame.keys()):
var_frame[self.coord_name_geo_y] = geo_y
if dset_source is None:
dset_source = obj_var
else:
dset_source = dset_source.combine_first(obj_var)
log_stream.info(' --------> Organize ' + var_name + ' dataset in a common dataset ... DONE')
elif isinstance(obj_var, dict):
if dset_source is None:
dset_source = {}
if var_name == 'ALL':
if isinstance(obj_var, dict):
obj_tmp = list(obj_var.values())[0]
var_name = obj_tmp.name
dset_source[var_name] = obj_var
else:
log_stream.error(' ===> Data dynamic object is not allowed')
raise NotImplementedError('Object dynamic type is not valid')
log_stream.info(' -------> Collect ' + var_name + ' source datasets ... DONE')
else:
log_stream.warning(' ===> Variable is not available in the datasets')
log_stream.info(' -------> Collect ' + var_name + ' source datasets ... FAILED')
else:
log_stream.warning(' ===> Type datasets is not defined')
log_stream.info(' -------> Collect ' + var_name + ' source datasets ... FAILED')
var_frame[self.datasets_tag] = dset_source
return var_frame
# Method to define filename of datasets
def collect_filename(self, time_series, template_run_ref, template_run_filled,
extra_dict=None):
dset_obj = self.dset_obj
dset_lut = self.dset_lut
datetime_idx_period = time_series.index
filetype_idx_period = time_series['File_Type'].values
filegroup_values_period = time_series['File_Group'].values
fileeta_values_period = time_series['File_ETA'].values
filegroup_values_period = filegroup_values_period[~np.isnan(filegroup_values_period)]
filegroup_idx_period = filegroup_values_period.tolist()
filegroup_idx_unique = set(filegroup_idx_period)
filegroup_idx_start = [filegroup_idx_period.index(x) for x in filegroup_idx_unique]
if extra_dict is not None:
template_run_extra = extra_dict
template_keys_extra = list(template_run_extra.keys())
else:
template_run_extra = None
template_keys_extra = None
ws_vars = {}
dset_vars = {}
dset_time = {}
for dset_format, dset_workspace in dset_obj.items():
log_stream.info(' ------> Collect ' + dset_format + ' source filename(s) ... ')
dset_vars[dset_format] = {}
dset_time[dset_format] = {}
dset_item = dset_workspace[self.datasets_tag]
for dset_step_type, dset_step_group in zip(self.dset_list_type, self.dset_list_group):
log_stream.info(' -------> Type ' + dset_step_type + ' ... ')
if dset_format == 'TimeSeries':
filegroup_idx_select = [idx for type, idx in zip(
filetype_idx_period[filegroup_idx_start], filegroup_idx_start) if type in self.dset_list_type]
filetype_idx_select = filetype_idx_period[filegroup_idx_select]
filegroup_idx_step = None
filetype_idx_period_tmp = []
datetime_idx_period_tmp = []
for filegroup_idx_step in filegroup_idx_select:
datetime_idx_period_tmp.append(datetime_idx_period[filegroup_idx_step])
filetype_idx_period_tmp.append(filetype_idx_period[filegroup_idx_step])
break
datetime_idx_period_tmp = pd.DatetimeIndex(datetime_idx_period_tmp)
datetime_idx_select = pd.DatetimeIndex([time_series.index[filegroup_idx_step]])
eta_array_select = np.array([fileeta_values_period[filegroup_idx_step]])
else:
group_condition = time_series['File_Type'].str.contains(dset_step_group)
time_df_select = time_series[group_condition]
datetime_idx_select = time_df_select.index
eta_array_select = fileeta_values_period[group_condition]
datetime_idx_period_tmp = datetime_idx_period
filetype_idx_period_tmp = filetype_idx_period
dset_type = dset_item[dset_step_type]
if dset_type is not None:
for dset_key, dset_value in dset_type.items():
log_stream.info(' --------> Variable ' + dset_key + ' ... ')
dset_null_check = False
folder_name_raw = dset_value[self.folder_name_tag]
file_name_raw = dset_value[self.file_name_tag]
file_period = dset_value[self.var_period_tag]
file_path_list = []
file_time_list = []
file_time_ts = []
message_condition = True
if (dset_step_type == 'FOR') and (file_period == 1):
log_stream.info(' ---------> Update datasets ETA of ' + dset_step_type
+ ' with a period equal to ' + str(file_period) + ' ... ')
eta_list_recomputed = []
for datetime_idx_step in datetime_idx_select:
eta_list_recomputed.append(datetime_idx_step.strftime(time_format_algorithm))
eta_array_recomputed = np.array(eta_list_recomputed)
eta_array_select = deepcopy(eta_array_recomputed)
log_stream.info(' ---------> Update datasets ETA of ' + dset_step_type
+ ' with a period equal to ' + str(file_period) + ' ... DONE')
for datetime_idx_step, eta_array_step in zip(datetime_idx_select, eta_array_select):
if dset_null_check:
break
datetime_step = datetime_idx_step.to_pydatetime()
eta_list_step = eta_array_step.split(';')
folder_name_sel = None
file_name_sel = None
file_time_sel = None
file_time_memory = None
for eta_list_tmp in eta_list_step:
datetime_eta_step = pd.Timestamp(eta_list_tmp).to_pydatetime()
template_run_ref_step = deepcopy(template_run_ref)
template_run_filled_step = deepcopy(template_run_filled)
if dset_format == 'TimeSeries':
if template_run_extra is not None:
template_merge_ref = {**template_run_ref_step, **template_run_extra}
template_run_filled_step = {**template_run_filled_step, **template_merge_ref}
template_time_filled = dict.fromkeys(list(self.template_time.keys()), datetime_eta_step)
template_merge_filled = {**template_run_filled_step, **template_time_filled}
template_merge_ref = {**template_run_ref_step, **self.template_time}
folder_name_tmp = fill_tags2string(folder_name_raw, template_merge_ref, template_merge_filled)
file_name_tmp = fill_tags2string(file_name_raw, template_merge_ref, template_merge_filled)
if isinstance(folder_name_tmp, list) and isinstance(file_name_tmp, list):
for folder_name_tmp_step, file_name_tmp_step in zip(folder_name_tmp, file_name_tmp):
if os.path.exists(os.path.join(folder_name_tmp_step, file_name_tmp_step)):
if folder_name_sel is None:
folder_name_sel = folder_name_tmp
if file_name_sel is None:
file_name_sel = file_name_tmp
if file_time_sel is None:
file_time_sel = datetime_step
file_time_memory = datetime_eta_step
elif isinstance(folder_name_tmp, str) and isinstance(file_name_tmp, str):
folder_name_sel = folder_name_tmp
file_name_sel = file_name_tmp
file_time_sel = datetime_step
if eta_list_step.__len__() > 1:
if os.path.exists(os.path.join(folder_name_sel, file_name_sel)):
break
else:
folder_name_sel = None
file_name_sel = None
file_time_sel = None
elif (folder_name_tmp is None) or (file_name_tmp is None):
log_stream.info(' --------> Variable ' + dset_key +
' ... SKIPPED. Datasets folder or filename are null.')
dset_null_check = True
break
else:
log_stream.info(' ------> Collect ' + dset_format + ' source filename(s) ... FAILED')
log_stream.error(' ===> Collect dynamic datasets filename(s) failed!')
raise NotImplementedError('Type of folder or file name is not supported')
if isinstance(folder_name_sel, list) and isinstance(file_name_sel, list):
for folder_name_sel_step, file_name_sel_step in zip(folder_name_sel, file_name_sel):
file_path_sel = os.path.join(folder_name_sel_step, file_name_sel_step)
if file_path_sel not in file_path_list:
file_path_list.append(file_path_sel)
if file_time_memory not in file_time_ts:
file_time_list.append(file_time_sel)
file_time_ts.append(file_time_memory)
elif isinstance(folder_name_sel, str) and isinstance(file_name_sel, str):
if (folder_name_sel is not None) and (file_name_sel is not None):
file_path_list.append(os.path.join(folder_name_sel, file_name_sel))
file_time_list.append(file_time_sel)
elif (folder_name_sel is None) and (file_name_sel is None):
if message_condition:
log_stream.warning(' ===> Datasets for variable ' + dset_key + ' is empty')
message_condition = False
else:
log_stream.error(' ===> Collect dynamic datasets filename(s) failed!')
raise NotImplementedError('Type of folder or file name is not supported')
if dset_key not in dset_vars[dset_format]:
dset_vars[dset_format][dset_key] = {}
dset_vars[dset_format][dset_key] = file_path_list
else:
file_list_tmp = dset_vars[dset_format][dset_key]
file_list_tmp.extend(file_path_list)
dset_vars[dset_format][dset_key] = file_list_tmp
if dset_key not in dset_time[dset_format]:
dset_time[dset_format][dset_key] = file_time_list
else:
time_list_tmp = dset_time[dset_format][dset_key]
time_list_tmp.extend(file_time_list)
dset_time[dset_format][dset_key] = time_list_tmp
log_stream.info(' --------> Variable ' + dset_key + ' ... DONE')
log_stream.info(' -------> Type ' + dset_step_type + ' ... DONE')
else:
log_stream.info(' -------> Type ' + dset_step_type + ' ... SKIPPED. Datasets is empty')
dict_vars = dset_vars[dset_format]
dict_time = dset_time[dset_format]
df_vars = pd.DataFrame({'Time': datetime_idx_period})
df_vars['File_Type'] = filetype_idx_period
df_vars = df_vars.reset_index()
df_vars = df_vars.set_index('Time')
for (var_key, var_time), var_data in zip(dict_time.items(), dict_vars.values()):
if dset_format == 'TimeSeries':
if isinstance(var_data, list):
var_data_tmp = self.column_sep.join(var_data)
else:
var_data_tmp = var_data
var_time_tmp = [datetime_idx_period[idx_group] for idx_group, idx_type in zip(
filegroup_idx_select, filetype_idx_select) if idx_type == 'OBS']
step_n = var_time_tmp.__len__()
var_data_tmp = [var_data_tmp] * step_n
else:
var_data_tmp = var_data
var_time_tmp = var_time
time_ts = pd.DatetimeIndex(var_time_tmp)
var_ts = pd.Series(index=time_ts, data=var_data_tmp, name=var_key).fillna(value=pd.NA)
df_vars[var_key] = var_ts
ws_vars[dset_format] = df_vars
log_stream.info(' ------> Collect ' + dset_format + ' source filename(s) ... DONE')
ws_model = {}
dset_model = {}
dset_time = {}
for dset_format, dset_workspace in dset_obj.items():
log_stream.info(' ------> Collect ' + dset_format + ' model filename(s) ... ')
dset_model[dset_format] = {}
dset_time[dset_format] = {}
dset_item = dset_workspace[self.model_tag]
dset_source = dset_workspace[self.datasets_tag]
folder_name_raw = dset_item[self.folder_name_tag]
file_name_raw = dset_item[self.file_name_tag]
file_var_lut = dset_lut[dset_format][self.model_tag]
if dset_format == 'TimeSeries':
filegroup_idx_select = [idx for type, idx in zip(
filetype_idx_period[filegroup_idx_start], filegroup_idx_start) if type in self.dset_list_type]
filetype_idx_select = filetype_idx_period[filegroup_idx_select]
filetype_idx_period_tmp = []
datetime_idx_period_tmp = []
for filegroup_idx_step in filegroup_idx_select:
datetime_idx_period_tmp.append(datetime_idx_period[filegroup_idx_step])
filetype_idx_period_tmp.append(filetype_idx_period[filegroup_idx_step])
break
datetime_idx_period_tmp = pd.DatetimeIndex(datetime_idx_period_tmp)
else:
filegroup_idx_select = [idx for idx, type in enumerate(filetype_idx_period)
if type in self.dset_list_group]
filetype_idx_period_tmp = []
datetime_idx_period_tmp = []
for filegroup_idx_step in filegroup_idx_select:
datetime_idx_period_tmp.append(datetime_idx_period[filegroup_idx_step])
filetype_idx_period_tmp.append(filetype_idx_period[filegroup_idx_step])
file_path_list = []
file_time_list = []
file_path_merged = []
for datetime_idx_step in datetime_idx_period_tmp:
datetime_step = datetime_idx_step.to_pydatetime()
template_run_ref_step = deepcopy(template_run_ref)
template_run_filled_step = deepcopy(template_run_filled)
template_time_filled = dict.fromkeys(list(self.template_time.keys()), datetime_step)
template_merge_filled = {**template_run_filled_step, **template_time_filled}
template_merge_ref = {**template_run_ref_step, **self.template_time}
if dset_format == 'Point':
template_merge_filled['dset_var_name_forcing_point'] = file_var_lut
if template_keys_extra is not None:
for template_key_extra in template_keys_extra:
template_merge_filled[template_key_extra] = None
if dset_format == 'TimeSeries':
if template_run_extra is not None:
template_merge_filled = {**template_merge_filled, **template_run_extra}
folder_name_tmp = fill_tags2string(folder_name_raw, template_merge_ref, template_merge_filled)
file_name_tmp = fill_tags2string(file_name_raw, template_merge_ref, template_merge_filled)
if isinstance(folder_name_tmp, list) and isinstance(file_name_tmp, list):
for folder_name_tmp_step, file_name_tmp_step in zip(folder_name_tmp, file_name_tmp):
file_path_tmp = os.path.join(folder_name_tmp_step, file_name_tmp_step)
if file_path_tmp not in file_path_list:
file_path_list.append(file_path_tmp)
if datetime_idx_step not in file_time_list:
file_time_list.append(datetime_idx_step)
elif isinstance(folder_name_tmp, str) and isinstance(file_name_tmp, str):
file_path_list.append(os.path.join(folder_name_tmp, file_name_tmp))
file_time_list.append(datetime_idx_step)
else:
log_stream.error(' ===> Collect dynamic model filename(s) failed!')
raise NotImplementedError('Type of folder or file name is not supported')
if dset_format == 'Point':
if isinstance(file_path_list, list):
file_path_tmp = self.column_sep.join(file_path_list)
file_path_merged.append(file_path_tmp)
file_path_list = []
else:
file_path_merged = deepcopy(file_path_list)
else:
file_path_merged = deepcopy(file_path_list)
dset_model[dset_format][self.model_tag] = {}
dset_model[dset_format][self.model_tag] = file_path_merged
dset_time[dset_format][self.model_tag] = {}
dset_time[dset_format][self.model_tag] = file_time_list
dict_model = dset_model[dset_format]
dict_time = dset_time[dset_format]
df_model = pd.DataFrame({'Time': datetime_idx_period})
df_model['File_Type'] = filetype_idx_period
df_model = df_model.reset_index()
df_model = df_model.set_index('Time')
for (var_key, var_time), var_data in zip(dict_time.items(), dict_model.values()):
if dset_format == 'TimeSeries':
if isinstance(var_data, list):
var_data_tmp = self.column_sep.join(var_data)
else:
var_data_tmp = var_data
var_time_tmp = [datetime_idx_period[idx_group] for idx_group, idx_type in zip(
filegroup_idx_select, filetype_idx_select) if idx_type == 'OBS']
step_n = var_time_tmp.__len__()
var_data_tmp = [var_data_tmp] * step_n
else:
var_data_tmp = var_data
var_time_tmp = var_time
time_ts = | pd.DatetimeIndex(var_time_tmp) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 11:33:55 2020
@author: User
"""
import sys
from pathlib import Path
import functools
# import collections
from collections import Counter
import pickle
# import types
# import post_helper
# import plotting
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.stats import linregress, zscore
import pandas as pd
import numpy as np
import datetime as dt
import pandas as pd
mpl.style.use("seaborn")
mpl.rcParams["figure.dpi"] = 100
# from sklearn.cluster import KMeans
# print ('Name prepare input:', __name__ )
if __name__ == "__main__":
# print(f'Package: {__package__}, File: {__file__}')
# FH_path = Path(__file__).parent.parent.parent.joinpath('FileHelper')
# sys.path.append(str(FH_path))
# sys.path.append(str(Path(__file__).parent.parent.joinpath('indexer')))
sys.path.append(str(Path(__file__).parent.parent.parent))
# sys.path.append("..")
# print(sys.path)
# import FileHelper
from FileHelper.PostChar import Characterization_TypeSetting, SampleCodesChar
from FileHelper.PostPlotting import *
from FileHelper.FindSampleID import GetSampleID
from FileHelper.FindFolders import FindExpFolder
# from FileHelper.FileFunctions.FileOperations import PDreadXLorCSV
from collect_load import Load_from_Indexes, CollectLoadPars
# from FileHelper.FindExpFolder import FindExpFolder
from plotting import eisplot
from prep_postchar import postChar
import EIS_export
elif "prepare_input" in __name__:
pass
# import RunEC_classifier
# from FileHelper.FindSampleID import FindSampleID
import logging
_logger = logging.getLogger(__name__)
# from FileHelper.PostChar import SampleSelection, Characterization_TypeSetting
def mkfolder(folder):
folder.mkdir(exist_ok=True, parents=True)
return folder
def filter_cols(_df, n):
if any(["startswith" in i for i in n]):
_lst = [i for i in _df.columns if i.startswith(n[-1])]
else:
_lst = [i for i in _df.columns if n[-1] in i]
return _lst
OriginColors = Characterization_TypeSetting.OriginColorList()
Pfolder = FindExpFolder().TopDir.joinpath(
Path("Preparation-Thesis/SiO2_projects/SiO2_Me_ECdepth+LC")
)
plotsfolder = mkfolder(Pfolder.joinpath("correlation_plots"))
EC_folder = Pfolder.joinpath("EC_data")
EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
print("finished")
# SampleCodesChar().load
def multiIndex_pivot(df, index=None, columns=None, values=None):
# https://github.com/pandas-dev/pandas/issues/23955
output_df = df.copy(deep=True)
if index is None:
names = list(output_df.index.names)
output_df = output_df.reset_index()
else:
names = index
output_df = output_df.assign(
tuples_index=[tuple(i) for i in output_df[names].values]
)
if isinstance(columns, list):
output_df = output_df.assign(
tuples_columns=[tuple(i) for i in output_df[columns].values]
) # hashable
output_df = output_df.pivot(
index="tuples_index", columns="tuples_columns", values=values
)
output_df.columns = pd.MultiIndex.from_tuples(
output_df.columns, names=columns
) # reduced
else:
output_df = output_df.pivot(
index="tuples_index", columns=columns, values=values
)
output_df.index = pd.MultiIndex.from_tuples(output_df.index, names=names)
return output_df
def get_float_cols(df):
return [key for key, val in df.dtypes.to_dict().items() if "float64" in str(val)]
def cm2inch(value):
return value / 2.54
# class PorphSamples():
# def __init__(self):
# self.template = PorphSamples.template()
def decorator(func):
@functools.wraps(func)
def wrapper_decorator(*args, **kwargs):
# Do something before
value = func(*args, **kwargs)
# Do something after
return value
return wrapper_decorator
def read_load_pkl(_pklstem):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
if _pklpath.exists():
try:
print("pkl reloading:", _pklpath)
DF_diff = pd.read_pickle(_pklpath)
DF_diff.columns
return DF_diff
except Exception as e:
print("reading error", e)
return pd.DataFrame()
else:
print("read error not existing", _pklpath)
return pd.DataFrame()
def save_DF_pkl(_pklstem, _DF):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
try:
print("pkl saving to:", _pklpath)
_DF.to_pickle(_pklpath)
except Exception as e:
print("pkl saving error", e, _pklpath)
return _pklpath
def load_dict_pkl(_pklstem):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
if _pklpath.exists():
try:
print("pkl reloading:", _pklpath)
with open(_pklpath, "rb") as file:
_dict = pickle.load(file)
return _dict
except Exception as e:
print("reading error", e)
return {}
else:
print("read error not existing", _pklpath)
return {}
def save_dict_pkl(_pklstem, _dict):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
try:
print("pkl saving to:", _pklpath)
with open(_pklpath, "wb") as file:
pickle.dump(_dict, file)
except Exception as e:
print("pkl saving error", e, _pklpath)
return _pklpath
def PorphSiO2_template():
# 'SerieIDs' : ('Porph_SiO2')*5,
Series_Porph_SiO2 = {
"SampleID": ("JOS1", "JOS2", "JOS3", "JOS4", "JOS5"),
"Metal": ("Fe", "Co", "MnTPP", "FeTPP", "H2"),
"color": (2, 4, 6, 15, 3),
}
Porphyrins = {
"TMPP": {"Formula": "C48H38N4O4", "MW": 734.8382},
"TMPP-Fe(III)Cl": {"Formula": "C48H36ClFeN4O4", "MW": 824.1204},
"TMPP-Co(II)": {"Formula": "C48H36CoN4O4", "MW": 791.7556},
"TTP-Mn(III)Cl": {"Formula": "C44H28ClMnN4", "MW": 703.1098},
"TPP-Fe(III)Cl": {"Formula": "C44H28ClFeN4", "MW": 704.0168},
"TPP": {"Formula": "C44H30N4", "MW": 614.7346},
}
Porph_template = pd.DataFrame(Series_Porph_SiO2)
return Porph_template
def EC_types_grp():
# KL ['ORR_E_AppV_RHE', 'ORR_KL_E_AppV_RHE','Electrode']
_basic_EC_cond = ["postAST_post", "Sweep_Type", "pH", "Loading_cm2"]
_extra_EC_cond = {
"N2CV": [],
"N2": [],
"ORR": ["RPM_DAC_uni"],
"KL": ["Electrode", "ORR_E_AppV_RHE"],
"EIS": ["E_RHE"],
"HER": ["HER_RPM_post"],
"OER": [],
}
_out = {key: _basic_EC_cond + val for key, val in _extra_EC_cond.items()}
return _out
def save_EC_index_PorphSiO2(EC_index, EC_folder):
_porph_index = EC_index.loc[EC_index.SampleID.isin(PorphSiO2_template().SampleID)]
_porph_index.to_excel(EC_folder.joinpath("EC_index_PorphSiO2.xlsx"))
# save_EC_index_PorphSiO2(EC_index, EC_folder)
class EC_PorphSiO2:
folder = FindExpFolder("PorphSiO2").compare
Porph_template = PorphSiO2_template()
# globals EC_index
# ['Model(Singh2015_RQRQ)', 'Model(Singh2015_RQRQR)', 'Model(Bandarenka_2011_RQRQR)',
# 'Model(Singh2015_RQRWR)', 'Model(Randles_RQRQ)', 'Model(Singh2015_R3RQ)']
# model_select = EC_PorphSiO2.EIS_models[1]
# self = EC_PorphSiO2()
def __init__(self):
# self.index, self.AST_days = EC_PorphSiO2.select_ECexps(EC_folder)
self.select_EC_ASTexps_from_ECindex()
# self.pars = EC_PorphSiO2.mergedEC()
# self.par_export = EC_OHC.to_excel(self.folder.joinpath('EC_ORR_HPRR.xlsx'))
def select_EC_ASTexps_from_ECindex(self):
EC_idx_PorphSiO2_samples = EC_index.loc[
EC_index.SampleID.isin(self.Porph_template.SampleID.unique())
]
# pd.read_excel(list(EC_folder.rglob('*EC_index*'))[0])
EC_idx_PorphSiO2_samples = EC_idx_PorphSiO2_samples.assign(
**{
"PAR_date_day_dt": [
dt.date.fromisoformat(np.datetime_as_string(np.datetime64(i, "D")))
for i in EC_idx_PorphSiO2_samples.PAR_date.to_numpy()
]
}
)
self.EC_idx_PorphSiO2_samples = EC_idx_PorphSiO2_samples
self.get_AST_days()
# LC_idx_fp = list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx')
EC_idx_PorphSiO2_AST = EC_idx_PorphSiO2_samples.loc[
EC_idx_PorphSiO2_samples.PAR_date_day_dt.isin(
[i for a in self.AST_days.to_numpy() for i in a]
)
]
# AST_days = EC_PorphSiO2.get_AST_days()
# EC_idx_PorphSiO2_AST.to_excel(list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx'))
self.EC_idx_PorphSiO2 = EC_idx_PorphSiO2_AST
# if LC_idx_fp.exists():
# else:
# try:
# LC_fls = pd.read_excel(LC_idx_fp,index_col=[0])
# except Exception as e:
# print(f'Excel load fail: {e}\n,file: {LC_idx_fp}')
# LC_fls = pd.DataFrame()
# return LC_fls, AST_days
def get_AST_days(self):
gr_idx = self.EC_idx_PorphSiO2_samples.groupby("PAR_date_day_dt")
AST_days = []
for n, gr in gr_idx:
# n,gr
exps = gr.PAR_exp.unique()
# gr.PAR_date_day.unique()[0]
if any(["AST" in i for i in exps]):
# print(n,exps)
# AST_days.append(n)
if n + dt.timedelta(1) in gr_idx.groups.keys():
_post = gr_idx.get_group(n + dt.timedelta(1))
# print(n + dt.timedelta(1), gr_idx.get_group(n + dt.timedelta(1)))
AST_days.append((n, n + dt.timedelta(1)))
else:
AST_days.append((n, n))
print(n + dt.timedelta(1), "grp missing")
# (AST_days[-1][0], AST_days[0][1])
# AST_days.append((dt.date(2019,5,6), dt.date(2019,1,25)))
# AST_days.append((dt.date(2019,5,6), dt.date(2019,1,26)))
_extra_AST_days = [
(dt.date(2019, 5, 6), dt.date(2019, 1, 25)),
(dt.date(2019, 5, 6), dt.date(2019, 1, 26)),
]
AST_days += _extra_AST_days
AST_days = pd.DataFrame(
AST_days, columns=["PAR_date_day_dt_pre", "PAR_date_day_dt_post"]
)
AST_days = AST_days.assign(
**{
"PAR_date_day_dt_diff": AST_days.PAR_date_day_dt_pre
- AST_days.PAR_date_day_dt_post
}
)
self.AST_days = AST_days
# def select_ECexps(EC_folder):
# LC_idx_fp = list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx')
# AST_days = EC_PorphSiO2.get_AST_days()
# if LC_idx_fp.exists():
# LC_fls = EC_PorphSiO2.EC_idx_PorphSiO2.loc[EC_PorphSiO2.EC_idx_PorphSiO2.PAR_date_day_dt.isin([i for a in AST_days.to_numpy() for i in a])]
# LC_fls.to_excel(list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx'))
# else:
# try:
# LC_fls = pd.read_excel(LC_idx_fp,index_col=[0])
# except Exception as e:
# print(f'Excel load fail: {e}\n,file: {LC_idx_fp}')
# LC_fls = pd.DataFrame()
# return LC_fls, AST_days
# def repr_index(self):
# PAR_exp_uniq = {grn : len(grp) for grn,grp in self.index.groupby("PAR_exp")}
# print(f'Len({len(self.index)},\n{PAR_exp_uniq}')
def _testing_():
tt = EC_prepare_EC_merged(reload_AST=True, reload_merged=True, reload_pars=True)
self = tt
N2CV = self.N2cv(reload=False, use_daily=True)
#%% == EC_prepare_EC_merged == testing
class EC_prepare_EC_merged:
EIS_models = EIS_export.EIS_selection.mod_select
# ['Model(EEC_Randles_RWpCPE)', 'Model(EEC_2CPE)', 'Model(EEC_2CPEpW)',
# 'Model(EEC_RQ_RQ_RW)', 'Model(EEC_RQ_RQ_RQ)', 'Model(Randles_RQRQ)']
ORR_reload = dict(reload=True, use_daily=False)
ORR_no_reload = dict(reload=False, use_daily=True)
use_daily = True
# global ParsColl
# ParsColl = ParsColl
mcols = [i for i in Load_from_Indexes.EC_label_cols if i not in ["PAR_file"]] + [
"Sweep_Type"
]
_pkl_EC_merged = "EC_merged_dict"
def __init__(self, reload_AST=False, reload_merged=False, reload_pars=True):
self.reload_AST = reload_AST
self.reload_merged = reload_merged
self.reload_pars = reload_pars
self.set_pars_collection()
self.reload_pars_kws = dict(reload=reload_pars, use_daily=self.use_daily)
self.EC_merged_dict = {}
self.load_EC_PorphSiO2()
self.load_merged_EC()
def set_pars_collection(self):
if "ParsColl" in globals().keys():
self.ParsColl = ParsColl
else:
Pars_Collection = CollectLoadPars(load_type="fast")
# globals()['Pars_Collection'] = Pars_Collection
ParsColl = Pars_Collection.pars_collection
self.ParsColl = ParsColl
def load_EC_PorphSiO2(self):
self.EC_PorphSiO2 = EC_PorphSiO2()
self.AST_days = self.EC_PorphSiO2.AST_days
self.EC_idx_PorphSiO2 = self.EC_PorphSiO2.EC_idx_PorphSiO2
def load_merged_EC(self):
if self.reload_merged:
self.reload_merged_EC()
if not self.EC_merged_dict:
_load_EC_merge = load_dict_pkl(self._pkl_EC_merged)
if _load_EC_merge:
self.EC_merged_dict = _load_EC_merge
def reload_merged_EC(self):
try:
self.load_N2CV()
self.load_ORR()
self.load_KL()
self.load_EIS()
self.load_HER()
self.add_filter_selection_of_EC_merged()
save_dict_pkl(self._pkl_EC_merged, self.EC_merged_dict)
except Exception as e:
_logger.warning(f"EC_prepare_EC_merged, reload_merged_EC failure: {e}")
def get_AST_matches(self, DF, _verbose=False):
# LC_fls, AST_days = EC_PorphSiO2.select_ECexps(EC_folder)
# DF = ORR.drop_duplicates()
# DF = N2CV.drop_duplicates()
# DF = EIS.drop_duplicates()
# DF = HER.drop_duplicates()
# DF = ttpars
if "PAR_date_day_dt" not in DF.columns:
DF = DF.assign(
**{
"PAR_date_day_dt": [
dt.date.fromisoformat(
np.datetime_as_string(np.datetime64(i, "D"))
)
for i in DF.PAR_date.to_numpy()
]
}
)
DF.PAR_date_day_dt = pd.to_datetime(DF.PAR_date_day_dt, unit="D")
# list((set(DF.columns).intersection(set(LC_fls.columns))).intersection(set(mcols) ))
# DF = pd.merge(DF,LC_fls,on=)
_compare_cols = [
i for i in ["SampleID", "pH", "Gas", "Loading_cm2"] if i in DF.columns
]
_swp_rpm = [
"Sweep_Type",
"RPM_DAC_uni" if "RPM_DAC_uni" in DF.columns else "RPM_DAC",
]
_coll = []
# AST_days_run_lst = [i for i in AST_days if len(i) == 2][-1:]
for n, r in self.AST_days.iterrows():
# if len(_dates) == 2:
# _pre,_post = _dates
# elif (len_dates) == 1:
_pre, _post = r.PAR_date_day_dt_pre, r.PAR_date_day_dt_post
_preslice = DF.loc[
(DF.PAR_date_day == _pre.strftime("%Y-%m-%d")) & (DF.postAST == "no")
]
pre = _preslice.groupby(_compare_cols)
_postslice = DF.loc[
(DF.PAR_date_day == _post.strftime("%Y-%m-%d")) & (DF.postAST != "no")
]
post = _postslice.groupby(_compare_cols)
_res = {}
_res = {
"pre_PAR_date_day_dt": _pre,
"post_PAR_date_day_dt": _post,
"AST_days_n": n,
}
# print(_res,[_preslice.postAST.unique()[0], _postslice.postAST.unique()[0]])
union = set(pre.groups.keys()).union(set(post.groups.keys()))
matches = set(pre.groups.keys()).intersection(set(post.groups.keys()))
_difference_pre = set(pre.groups.keys()).difference(set(post.groups.keys()))
_difference_post = set(post.groups.keys()).difference(
set(pre.groups.keys())
)
# _diffr.append((_pre,_post,_difference_pre, _difference_post))
if not _preslice.empty and not _postslice.empty:
for match in union:
_res.update(dict(zip(_compare_cols, match)))
_mgrpcols = ["PAR_file", "dupli_num", "postAST"]
if match in matches:
_mpre = pre.get_group(match).groupby(_mgrpcols)
_mpost = post.get_group(match).groupby(_mgrpcols)
elif match in _difference_pre:
_mpre = pre.get_group(match).groupby(_mgrpcols)
_mpost = pre.get_group(match).groupby(_mgrpcols)
elif match in _difference_post:
_mpre = post.get_group(match).groupby(_mgrpcols)
_mpost = post.get_group(match).groupby(_mgrpcols)
# print(_mpost.groups)
for (_prePF, npr, _preAST), prgrp in _mpre:
_res.update(
{
"pre_dupli_num": npr,
"pre_PAR_file": _prePF,
"pre_postAST": _preAST,
}
)
for (_poPF, npo, _postAST), pogrp in _mpost:
_res.update(
{
"post_dupli_num": npo,
"post_PAR_file": _poPF,
"post_postAST": _postAST,
"dupli_num_combo": f"{npr}, {npo}",
}
)
if _postAST in "postAST_sHA|postAST_LC" and _verbose:
print(_res)
_pr1 = prgrp.groupby(_swp_rpm)
_po1 = pogrp.groupby(_swp_rpm)
_rpmswp_matches = set(_pr1.groups.keys()).intersection(
set(_po1.groups.keys())
)
for _m in _rpmswp_matches:
_res.update(dict(zip(_swp_rpm, _m)))
# print(_res)
_coll.append(_res.copy())
AST_matches = pd.DataFrame(_coll)
return AST_matches
# prgrp.groupby(['Sweep_Type','RPM_DAC']).groups
# prgrp['ORR_Jkin_min_700']-pogrp['ORR_Jkin_min_700']
def load_N2CV(self):
N2CV = self.edit_pars_N2cv(**self.reload_pars_kws)
# N2_pltqry = EC_merged_dict.get('N2CV')
N2_AST = self.get_AST_matches(N2CV)
N2_AST_diff = self.compare_AST_pars(N2CV, N2_AST, reload=self.reload_AST)
# _DFtype = EC_PorphSiO2.sense_DF_type(N2CV)
# EC_merged_dict.update({'N2CV' : N2_AST_diff})
self.EC_merged_dict.update(
{"N2CV": {"PARS": N2CV, "AST_matches": N2_AST, "AST_diff": N2_AST_diff}}
)
def load_ORR(self, _testing=False):
ORR = self.edit_pars_ORR()
ORR_AST = self.get_AST_matches(ORR)
ORR_AST_diff = self.compare_AST_pars(ORR, ORR_AST, reload=self.reload_AST)
if _testing:
ttpars = ORR.query('RPM_DAC_uni > 1000 & Sweep_Type == "cathodic"')
tt_AST = self.get_AST_matches(ttpars)
tt = ORR_AST.query('RPM_DAC_uni > 1000 & Sweep_Type == "cathodic"')
tt_diff = self.compare_AST_pars(ORR, tt, reload=reload_AST, save_pkl=False)
# ttpfs = ORR.loc[ORR.ORR_Jkin_max_700 > 0].PAR_file.unique()
# ttpfs = ORR.query('Sweep_Type == "mean"').loc[ORR.ORR_E_onset > 0.85].PAR_file.unique()
# ORR.loc[(ORR.ORR_E_onset > 0.85) & (ORR.Sweep_Type == 'cathodic')].PAR_file.unique()
# EC_merged_dict.update({'ORR' : ORR_AST_diff})
self.EC_merged_dict.update(
{"ORR": {"PARS": ORR, "AST_matches": ORR_AST, "AST_diff": ORR_AST_diff}}
)
def load_KL(self):
KL = self.edit_pars_KL()
KL = KL.assign(**{"RPM_DAC": 1500})
KL_AST = self.get_AST_matches(KL)
KL_AST_diff = self.compare_AST_pars(KL, KL_AST, reload=self.reload_AST)
# EC_merged_dict.update({'KL' : KL_AST_diff})
self.EC_merged_dict.update(
{"KL": {"PARS": KL, "AST_matches": KL_AST, "AST_diff": KL_AST_diff}}
)
def load_EIS(self):
EIS = self.edit_pars_EIS()
EIS_AST = self.get_AST_matches(EIS)
EIS_AST_diff = self.compare_AST_pars(EIS, EIS_AST, reload=self.reload_AST)
# EC_merged_dict.update({'EIS' : EIS_AST_diff})
self.EC_merged_dict.update(
{"EIS": {"PARS": EIS, "AST_matches": EIS_AST, "AST_diff": EIS_AST_diff}}
)
def load_HER(self):
HER = self.edit_pars_HER()
HER_type_grp = HER.groupby("HER_type")
HER.HER_at_E_slice = HER.HER_at_E_slice.round(3)
HER_AST = self.get_AST_matches(HER)
for Htype, Hgrp in HER_type_grp:
# Htype, Hgrp = 'E_slice', HER.loc[HER.groupby('HER_type').groups['E_slice']]
HER_AST_diff = self.compare_AST_pars(
Hgrp, HER_AST, reload=self.reload_AST, extra=Htype
)
try:
if not HER_AST_diff.empty:
self.EC_merged_dict.update(
{
f"HER_{Htype}": {
"PARS": Hgrp,
"AST_matches": HER_AST,
"AST_diff": HER_AST_diff,
}
}
)
except Exception as e:
print(f"HER {Htype} fail, {e}")
# EC_merged_dict.update({f'HER_{Htype}' : HER_AST_diff})
def finish_EC_merged(self):
_pkl_EC_merged = "EC_merged_dict"
EC_merged_dict = EC_PorphSiO2.add_filter_selection_of_EC_merged(EC_merged_dict)
save_dict_pkl(_pkl_EC_merged, EC_merged_dict)
# EC_merged_dict = load_dict_pkl(_pkl_EC_merged)
def add_filter_selection_of_EC_merged(self):
_drop_AST_row_pre = [
"2019-01-25;N2_20cls_300_100_10_JOS5_256;no;0",
"2019-01-25;N2_20cls_300_100_10_JOS4_256;no;0",
]
_check_cols = [
"SampleID",
"AST_row",
"PAR_date_day_dt_pre",
"PAR_date_day_dt_post",
"postAST_post",
]
_srt2 = ["postAST_post", "SampleID"]
_ch_match = [
"SampleID",
"pre_PAR_date_day_dt",
"post_PAR_date_day_dt",
"post_postAST",
"pre_postAST",
]
_sortcols = ["SampleID", "post_postAST"][::-1]
pd.set_option("display.max_columns", 6)
pd.set_option("display.width", 100)
for _EC, _DF in self.EC_merged_dict.items():
# _EC, _DF = 'N2CV', EC_merged_dict['N2CV']
# _EC, _DF = 'ORR', EC_merged_dict['ORR']
# print(_EC)
if "AST_row_n" not in _DF["AST_diff"].columns:
_DF["AST_diff"]["AST_row_n"] = [
int(i[-1]) for i in _DF["AST_diff"].AST_row.str.split("_").values
]
AST_diff = _DF["AST_diff"].copy()
AST_diff.loc[~AST_diff.AST_row_pre.isin(_drop_AST_row_pre)]
AST_matches = (
_DF["AST_matches"].copy().sort_values(by=["post_postAST", "SampleID"])
)
_rem1 = AST_matches.loc[
(AST_matches.post_postAST == "postAST_LC")
& (AST_matches.SampleID.isin(["JOS2", "JOS4", "JOS5"]))
& (AST_matches.pre_PAR_date_day_dt == dt.date(2019, 1, 25))
].assign(**{"rem": 1})
_rem2 = AST_matches.loc[
(
(AST_matches.post_postAST == "postAST_LC")
& (AST_matches.pre_postAST == "no")
& (
AST_matches.SampleID.isin(
["JOS1", "JOS2", "JOS3", "JOS4", "JOS5"]
)
)
& (AST_matches.pre_PAR_date_day_dt == dt.date(2019, 5, 6))
& (
AST_matches.post_PAR_date_day_dt.isin(
[dt.date(2019, 1, 25), dt.date(2019, 1, 26)]
)
)
)
].assign(**{"rem": 2})
# _keep_clean.loc[2].to_dict()
# _jos3 = {'SampleID': 'JOS3', 'pre_PAR_date_day_dt': dt.date(2019, 1, 24), 'post_PAR_date_day_dt': dt.date(2019, 1, 25),
# 'post_postAST': 'postAST_LC', 'pre_postAST': 'no'}
# _jos3qry = ' & '.join([f'{k} == {repr(val)}' for k,val in _jos3.items()])
# AST_matches.query(_jos3qry)
_rem3 = AST_matches.loc[
(
(AST_matches.post_postAST == "postAST_LC")
& (AST_matches.pre_postAST == "no")
& (AST_matches.SampleID.isin(["JOS3"]))
& (AST_matches.pre_PAR_date_day_dt == dt.date(2019, 1, 24))
& (AST_matches.post_PAR_date_day_dt == dt.date(2019, 1, 25))
)
].assign(**{"rem": 3})
_rem4 = AST_matches.loc[(AST_matches.pre_postAST != "no")].assign(
**{"rem": 4}
)
_edit = _rem1 # changed later 15.03
_remove = pd.concat([_rem2, _rem4, _rem3])
_keep = AST_matches.iloc[~AST_matches.index.isin(_remove.index.values)]
AST_matches[_ch_match].drop_duplicates()
_rem_clean = _remove[_ch_match + ["rem"]].sort_values(by=_sortcols)
_keep_clean = _keep[_ch_match].sort_values(by=_sortcols)
# _remove[['SampleID','post_postAST']] # check
# _rem = _DF['AST_diff'].loc[_DF['AST_diff']['AST_row_n'].isin(_remove.index.values)]
# _rem[['SampleID','postAST_post','PAR_date_day_pre']] #check
_filtered = AST_diff.loc[~AST_diff["AST_row_n"].isin(_remove.index.values)]
# DF['AST_diff'] = _filtered
self.EC_merged_dict.update({_EC: {**_DF, **{"AST_diff_filter": _filtered}}})
print(
f'EC merged dict updated with dropped rows in "AST_diff_filter" for:\n {self.EC_merged_dict.keys()}'
)
# return EC_merged_dict
# _DF['AST_diff'].loc[_DF['AST_diff'].AST_row_n.isin(_rem]
def EC_merge_postchar(_reloadset=False):
_pkl_EC_postchar = "EC_merged_postchars"
EC_postchars = load_dict_pkl(_pkl_EC_postchar)
if not EC_postchars and _reloadset != True:
EC_merged_dict = EC_PorphSiO2.mergedEC(_reloadset=True)
# EC_merged_dict_bak = EC_merged_dict.copy()
EC_merged_dict = EC_PorphSiO2.add_filter_selection_of_EC_merged(
EC_merged_dict
)
postChars = postChar().merged
_extracols = [i for i in SampleCodes.columns if not "SampleID" in i]
EC_postchars = {}
for _EC, _DF_dict in EC_merged_dict.items():
_DF = _DF_dict["AST_diff_filter"]
_initcols = _DF.columns
_DF = _DF.dropna(axis=1, how="all")
_DF = _DF.drop(columns=_DF.columns.intersection(_extracols))
_DF = pd.merge(_DF, postChars, on="SampleID")
_postcols = _DF.columns
EC_postchars.update({_EC: _DF})
save_dict_pkl(_pkl_EC_postchar, EC_postchars)
return EC_postchars
def _fix_ORR_scans():
EC_postchars = EC_PorphSiO2.EC_merge_postchar(_reloadset=True)
_ORR = EC_postchars["ORR"]
_J245 = _ORR.loc[
_ORR.SampleID.isin(["JOS2,", "JOS4", "JOS5"])
& (_ORR.postAST_post == "postAST_LC")
]
_extracols = [i for i in SampleCodes.columns if not "SampleID" in i]
def compare_AST_pars(self, _DF, _AST_in, reload=False, extra="", save_pkl=True):
# _DF, _AST_in = EIS, EIS_AST
# _DF, _AST_in = N2CV, N2_AST
# _DF, _AST_in = ORR, ORR_AST
# _DF, _AST_in = KL, KL_AST
# _DF, _AST_in = HER, HER_AST
# _DF, _AST_in = Hgrp, HER_AST
# _DF, _AST_in = ttN2CV, ttAST
# reload, extra = _reloadset, Htype
_DF = _DF.drop_duplicates()
_DFtype = self.sense_DF_type(_DF)
_DFtype = "".join([i for i in _DFtype if str.isalpha(i)])
_DFtype_prefix = _DFtype.split("_")[0]
if extra:
_pklpath = EC_PorphSiO2.folder.joinpath(
f"AST_compared_pars_{_DFtype}_{extra}.pkl"
)
else:
_pklpath = EC_PorphSiO2.folder.joinpath(f"AST_compared_pars_{_DFtype}.pkl")
if _pklpath.exists() and not reload:
try:
print("AST compare reading:", _pklpath)
DF_diff = pd.read_pickle(_pklpath)
return DF_diff
except Exception as e:
return print("reading error", e)
else:
_prec = [i for i in _AST_in.columns if not i.startswith("post")]
_precols = [
i.split("pre_")[-1] if i.startswith("pre") else i for i in _prec
]
_post = [i for i in _AST_in.columns if not i.startswith("pre")]
_postcols = [
i.split("post_")[-1] if i.startswith("post") else i for i in _post
]
_dropnacols = set(_post + _prec)
list(set(_prec).intersection(set(_post)))
_AST = _AST_in.dropna(subset=_dropnacols, how="any")
# _AST = _AST_in.loc[(_AST_in.SampleID == "JOS4") ]
# & (_AST_in.post_postAST.str.contains('LC'))]
_DF_diff_out = []
_errors = []
_dms = []
# _AST.loc[_AST.SampleID == "JOS4"].tail(2)
for n, r in _AST.iterrows():
# r[_dropnacols]
_uniq_AST_row_pre = f"{r.pre_PAR_date_day_dt};{Path(r.pre_PAR_file).stem};{r.pre_postAST};{int(r.pre_dupli_num)}"
_uniq_AST_row_post = f"{r.post_PAR_date_day_dt};{Path(r.post_PAR_file).stem};{r.post_postAST};{int(r.post_dupli_num)}"
# EIS.query(' and '.join([f'{k} == {repr(v)}' for k, v in _pred.items()]))
_pred = dict(zip(_precols, r[_prec].to_dict().values()))
_preQ = " & ".join(
[f"{k} == {repr(v)}" for k, v in _pred.items() if k in _DF.columns][
1:
]
)
_Dpre = _DF.query(_preQ).dropna(axis=1, how="all")
_postd = dict(zip(_postcols, r[_post].to_dict().values()))
_postQ = " & ".join(
[
f"{k} == {repr(v)}"
for k, v in _postd.items()
if k in _DF.columns
][1:]
)
_Dpos = _DF.query(_postQ).dropna(axis=1, how="all")
_dms.append((n, _pred, _postd))
# pd.merge(_Dpre,_Dpos)
_0 = [
(i, _Dpre[i].unique()[0])
for i in _Dpre.columns
if _Dpre[i].nunique() <= 1 and not i.startswith(_DFtype_prefix)
]
_1 = [
(i, _Dpos[i].unique()[0])
for i in _Dpos.columns
if _Dpos[i].nunique() <= 1 and not i.startswith(_DFtype_prefix)
]
# _dms.append((n, len(_Dm), _Dm ))
_mcols = [
i[0]
for i in set(_0).intersection(set(_1))
if not i[0].startswith("dupli")
]
_mcols = [
i
for i in _mcols
if i not in ["PAR_exp", "Dest_dir"] and not i.startswith("EXP_")
]
_mcols.sort()
_othercols = _Dpos.columns.difference(_mcols)
t2 = _Dpos[_othercols]
if "EIS" in _DFtype and all(
["E_RHE" in i for i in [_Dpre.columns, _Dpos.columns]]
):
_mcols += ["E_RHE"]
# _Dm = pd.merge(_Dpre,_Dpos,on=_mcols + ['E_RHE'],suffixes=['_pre','_post'])
elif "ORR" in _DFtype:
_KLcols = ["ORR_E_AppV_RHE", "ORR_KL_E_AppV_RHE", "Electrode"]
if all(i in _othercols for i in _KLcols):
_mcols += _KLcols
# _Dm = pd.merge(_Dpre, _Dpos, on = _mcols, suffixes = ['_pre','_post'])
elif "HER" in _DFtype:
_addcols = [
i
for i in [
"HER_type",
"HER_at_J_slice",
"HER_at_E_slice",
"HER_Segnum",
]
if i in set(_Dpre.columns).union(_Dpos.columns)
]
_mcols += _addcols
_Dm = pd.merge(_Dpre, _Dpos, on=_mcols, suffixes=["_pre", "_post"])
_Dm = _Dm.assign(
**{
"AST_row": f"{_DFtype}_{n}",
"AST_row_n": int(n),
"AST_days_n": r.AST_days_n,
"AST_row_pre": _uniq_AST_row_pre,
"AST_row_post": _uniq_AST_row_post,
}
)
# [(i, _Dpos[i].nunique(), _Dpos[i].unique()[0], _Dpre[i].nunique(), _Dpre[i].unique()[0], (_Dpos[i].unique(),_Dpre[i].unique()))
# for i in _mcols if _Dpos[i].nunique() > 1]
if _Dm.empty:
run_this
# try:
# _Dm = pd.merge_asof(_Dpre.sort_values(_mcols), _Dpos.sort_values(_mcols), on = _mcols, suffixes = ['_pre','_post'])
_parcols = [
(i, i.replace("_pre", "_post"))
for i in _Dm.columns
if i.startswith(_DFtype_prefix)
and i.endswith("_pre")
and i.replace("_pre", "_post") in _Dm.columns
]
for _c0, _c1 in _parcols:
try:
_diffabs = _Dm[_c0] - _Dm[_c1]
_diffperc = 100 * (_Dm[_c1] - _Dm[_c0]) / _Dm[_c0]
_Dm = _Dm.assign(
**{
_c0.split("_pre")[0] + "_diff_abs": _diffabs,
_c0.split("_pre")[0] + "_diff_perc": _diffperc,
}
)
except Exception as e:
# pass
_errors.append((_c0, _c1, e))
_DF_diff_out.append(_Dm)
# print(_c0, e)
DF_diff = pd.concat(_DF_diff_out).drop_duplicates()
if save_pkl == True:
DF_diff.to_pickle(_pklpath)
_logger.info(f"AST compare len({len(DF_diff)}) saved to:{_pklpath}")
return DF_diff
# DF_diff.groupby(['postAST_post','SampleID']).plot(x='E_RHE', y='EIS_Rct_O2_diff_abs',ylim=(-200,200))
def sense_DF_type(self, _DF):
# _c = [i[0] for i in Counter([i.split('_')[0] for i in _DF.columns]).most_common(5) if i[0] not in ['BET','tM']][0]
_excl = set(self.EC_idx_PorphSiO2.columns).union(SampleCodes.columns)
_res = [
i
for i in Counter(
["_".join(i.split("_")[0:2]) for i in _DF.columns]
).most_common(20)
if not any([i[0] in b for b in _excl]) and i[0][0].isalnum()
]
_res2 = Counter(["_".join(i.split("_")[0:1]) for i, c in _res])
_type = _res2.most_common(1)[0][0]
_extraC = Counter(
["_".join(i.split("_")[1:2]) for i in _DF.columns if _type in i]
).most_common(1)
if _extraC[0][1] > 4:
_type = f"{_type}_{_extraC[0][0]}"
# if _res2.most_common(2)[1][1] > 3:
# _type = f'{_type}_{_res2.most_common(2)[1][0]}'
return _type
# EC_all_merged_lst.append(EC_OHN_merged)
# EC_all_merged = pd.concat(EC_all_merged_lst)
# ORR_cath = EC_PorphSiO2.ORR_updated_pars(sweep_type_select='cathodic')
# ORR_an = EC_Pfrom collections import CounterorphSiO2.ORR_updated_pars(sweep_type_select='anodic')
# EC_OHN2 = pd.merge(template, pd.merge(ORR_an,pd.merge(HPRR, N2CV),on='SampleID'), on='SampleID')
# EC_OHN2_cath = pd.merge(template, pd.merge(ORR,pd.merge(HPRR, N2CV),on='SampleID'), on='SampleID')
# EC_OHN2.to_excel(FindExpFolder('PorphSiO2').compare.joinpath('EC_ORR_HPRR_N2.xlsx'))
def export_to_xls(EC_OHN_merged):
export_path = FindExpFolder("PorphSiO2").compare.joinpath(f"EC_pars_all.xlsx")
if "Sweep_Type" in EC_OHN_merged.columns:
with pd.ExcelWriter(export_path) as writer:
for swp, swpgr in EC_OHN_merged.groupby("Sweep_Type"):
swpgr.to_excel(writer, sheet_name=swp)
swpgr.to_excel(export_path.with_name(f"EC_pars_{swp}.xlsx"))
else:
export_path = FindExpFolder("PorphSiO2").compare.joinpath(
"EC_pars_no-sweep.xlsx"
)
EC_OHN_merged.to_excel(export_path)
print(f"EC pars saved to:\n{export_path}")
return export_path
def edit_columns(func, template=pd.concat([PorphSiO2_template(), SampleCodes])):
def wrapper(*args, **kwargs):
if kwargs:
pars_out, suffx = func(*args, **kwargs)
else:
pars_out, suffx = func(*args)
_skipcols = set(
EC_prepare_EC_merged.mcols
+ ["RPM_DAC_uni"]
+ list(PorphSiO2_template().columns)
+ list(EC_index.columns)
+ list(SampleCodes.columns)
)
cols = [
i
for i in pars_out.columns
if i not in _skipcols and not i.startswith(f"{suffx}")
]
pars_out = pars_out.rename(columns={i: f"{suffx}_" + i for i in cols})
return pars_out
return wrapper
@edit_columns
def edit_pars_HPRR(sweep_type_select=["anodic", "cathodic"]):
hfs = []
for swp in sweep_type_select:
hprr_files = list(EC_PorphSiO2.folder.rglob(f"*{swp}*HPRR*disk*"))
# print(hprr_files)
for hf in hprr_files:
hprr_raw = pd.read_excel(hf)
hprr_raw["file"] = hf.stem
E_App_col = [i for i in hprr_raw.columns if "E_APP" in i.upper()][0]
E_jmin = hprr_raw.iloc[np.abs(hprr_raw["jmAcm-2"]).idxmin()][E_App_col]
sID = GetSampleID.try_find_sampleID(hf)[0]
fit_lin_fit = linregress(hprr_raw[E_App_col], hprr_raw["HPRR_j0_Fit"])
hfs.append(
{
"SampleID": sID,
"E_onset": E_jmin,
"dj/dE": fit_lin_fit[0],
"Sweep_Type": swp,
}
)
HPRR_pars_origin = pd.DataFrame(hfs)
return HPRR_pars_origin, "HPRR"
def load_pars_HER(self):
HER_pars_all = Load_from_Indexes.HER_pars_OVV(**self.reload_pars_kws)
self.pars_HER = HER_pars_all
@edit_columns
def edit_pars_HER(self, sweep_type_select=["anodic", "cathodic"], unit="F"):
# reload= False, use_daily = True, extra_plotting=False, xls_out = False
# LC_idx = self.index
if (
not Pfolder.joinpath("HER_orig_data.pkl").exists()
or self.reload_pars == True
):
self.load_pars_HER()
HER_pars = self.pars_HER.loc[
(
(self.pars_HER._type == "HER_pars")
& (self.pars_HER.PAR_file.isin(self.index.PAR_file.to_numpy()))
)
]
HER_pars.to_pickle(Pfolder.joinpath("HER_orig_data.pkl"))
else:
HER_pars = pd.read_pickle(Pfolder.joinpath("HER_orig_data.pkl"))
HER_pars = HER_pars.dropna(how="all", axis=1)
return HER_pars, "HER"
def load_pars_ORR(self):
ORR_pars_all = self.ParsColl["ORR_pars"]
# Load_from_Indexes.ORR_pars_OVV(**self.reload_pars_kws)
self.pars_ORR = ORR_pars_all
@edit_columns
def edit_pars_ORR(self):
if not hasattr(self, "pars_ORR"):
self.load_pars_ORR()
ORR_pars = self.pars_ORR.loc[
(
(self.pars_ORR.source_type == "ORR_pars")
& (
self.pars_ORR.PAR_file.isin(
self.EC_idx_PorphSiO2.PAR_file.to_numpy()
)
)
)
]
ORR_pars = ORR_pars.dropna(how="all", axis=1)
# Adding log cols to ORR pars
ORR_pars = ORR_pars.assign(
**{
f'{"_".join(i.split("_")[0:-1])}_log_{i.split("_")[-1]}': np.log(
ORR_pars[i]
)
for i in ORR_pars.columns
if "Jkin" in i
}
)
return ORR_pars, "ORR"
@edit_columns
def edit_pars_KL(self):
if not hasattr(self, "pars_ORR"):
self.load_pars_ORR()
KL_pars = self.pars_ORR.loc[
(
(self.pars_ORR.source_type == "KL_pars")
& (
self.pars_ORR.PAR_file.isin(
self.EC_idx_PorphSiO2.PAR_file.to_numpy()
)
)
)
]
KL_pars = KL_pars.dropna(how="all", axis=1)
return KL_pars, "ORR"
def load_pars_N2CV(self):
# N2_loadpars = N2_LoadPars(reload = True, reload_raw = False )
Cdl_pars_all = self.ParsColl["N2_pars"]
# N2_loadpars.N2_pars
# Load_from_Indexes.N2_pars_OVV(**self.reload_pars_kws)
# (reload= self.reload_pars, use_daily = use_daily, extra_plotting=extra_plotting, xls_out = xls_out)
self.pars_N2CV = Cdl_pars_all
@edit_columns
def edit_pars_N2cv(
self,
sweep_type_select=["anodic", "cathodic"],
unit="F",
reload=False,
use_daily=True,
extra_plotting=False,
xls_out=False,
):
self.load_pars_N2CV()
if not Pfolder.joinpath("N2_orig_data.pkl").exists() or reload == True:
Cdl_pars_all = self.pars_N2CV
Cdl_pars = Cdl_pars_all.loc[
Cdl_pars_all.PAR_file.isin(self.EC_idx_PorphSiO2.PAR_file.to_numpy())
]
# IndexOVV_N2_pars_fn = FindExpFolder('VERSASTAT').PostDir.joinpath('N2Cdl_pars_IndexOVV_v{0}.pkl.compress'.format(FileOperations.version))
Cdl_pars = Cdl_pars.assign(**{"E_RHE_mV": 1000 * Cdl_pars.E_RHE.to_numpy()})
# Cdl_pars.index = pd.MultiIndex.from_frame(Cdl_pars[['PAR_file','Sweep_Type_N2']])
# N2_files, N2fs = list(EC_PorphSiO2.folder.rglob('*CVs*xlsx')), []
N2fs = []
if unit == "mF":
unit_factor = 1
elif unit == "F":
unit_factor = 1e-3
else:
unit_factor = 1
for n2f, ngr in Cdl_pars.groupby("PAR_file"):
idx_cols = [i for i in ngr.columns if ngr[i].nunique() == 1]
_dc = [i for i in ngr.columns if ngr[i].nunique() > 1]
# sID = GetSampleID.try_find_sampleID(n2f)[0]
ngr.index = pd.MultiIndex.from_frame(ngr[idx_cols])
ngr.drop(columns=idx_cols, inplace=True)
ngr = ngr.dropna(axis=1, how="all")
for swp, swgrp in ngr.groupby("Sweep_Type_N2"):
if swp in sweep_type_select:
# anod = n2_raw.get(swp)
swgrp_Ev = swgrp.loc[
(swgrp.E_RHE_mV.isin(np.arange(0.0, 1000.0, 100)))
& (swgrp.Cdl_R > 0.8)
]
_mgr = []
for n, gr in swgrp_Ev.groupby("E_RHE_mV"):
if len(gr) > 1:
_mean = pd.DataFrame(pd.DataFrame(gr.mean(axis=0)).T)
_mean.index = gr.take([0]).index
_mgr.append(_mean)
else:
_mgr.append(gr)
_swgr_Ev_mean = pd.concat(_mgr)
_pvt = _swgr_Ev_mean.pipe(
multiIndex_pivot,
index=None,
columns=["E_RHE_mV"],
values="Cdl",
)
_pvt = _pvt.assign(**{"Sweep_Type": swp})
N2fs.append(_pvt)
else:
pass
N2_orig = pd.concat([i.reset_index() for i in N2fs], ignore_index=True)
N2_orig.columns = list(N2_orig.columns.get_level_values(0))
# N2_orig.index.names = N2fs[0].index.names
N2_orig = N2_orig.rename(
columns={
i: f"Cdl_{unit}cm-2_{int(i)}" for i in np.arange(0.0, 1000.0, 100)
}
)
N2_orig = N2_orig.assign(**{"RPM_DAC": 0})
N2_orig.to_pickle(Pfolder.joinpath("N2_orig_data.pkl"))
else:
N2_orig = pd.read_pickle(Pfolder.joinpath("N2_orig_data.pkl"))
# N2_orig = pd.DataFrame(N2fs) #.set_index('SampleID','Sweep_Type')
return N2_orig, "N2"
def load_pars_EIS(self):
_source = "Load pars"
if "files" in _source:
eis_files, eisfs = (
list(
self.folder.parent.joinpath(f"EIS_Porph_SiO2\{model_select}").rglob(
"JOS*.xlsx"
)
),
[],
)
if eis_files:
for ef in eis_files:
eis_raw = pd.read_excel(ef, index_col=[0])
eisfs.append(eis_raw)
EIS_pars_mod = pd.concat(eisfs, ignore_index=True).reset_index(
drop=True
)
else:
print("EIS pars file list empty!!")
else:
EIS_pars_mod = self.ParsColl["EIS_pars"]
# Load_from_Indexes.EIS_pars_OVV(reload= False, extra_plotting=False, xls_out = False, use_daily = True, use_latest=True)
# EIS_pars_mod = EIS_pars.loc[EIS_pars.Model_EEC.isin(self.EIS_models.values())]
self.pars_EIS = EIS_pars_mod
@edit_columns
def edit_pars_EIS(self, _source="Load pars"):
"""Models used are selected in the EIS_export module
via dict from EC_PorphSiO2.EIS_models"""
self.load_pars_EIS()
EIS_pars_mod = self.pars_EIS
EIS_pars_mod = EIS_pars_mod.loc[
EIS_pars_mod.index.isin(EIS_pars_mod.best_mod_index)
]
_sample_uniq_cols1 = set(
[
a
for n, gr in EIS_pars_mod.groupby("SampleID")
for a in [i for i in gr.columns if gr[i].nunique() == 1]
]
)
_sample_uniq_cols2 = set(
[
a
for n, gr in EIS_pars_mod.groupby("SampleID")
for a in [i for i in gr.columns if gr[i].nunique() == 2]
]
)
_sample_uniq_cols2.difference(_sample_uniq_cols1)
# Create EIS var columns with gas N2 or O2 as suffix names
# EPgrp = EIS_pars_mod.groupby(['Gas','Model_EEC'])
EPgrp_gas = EIS_pars_mod.groupby(["Gas"])
# N2grp = ('N2',self.EIS_models.get('N2'))
# O2grp = ('O2',self.EIS_models.get('O2'))
# EP_N2,EP_O2 = EPgrp.get_group(N2grp).drop(columns='Gas'), EPgrp.get_group(O2grp).drop(columns='Gas')
EC_exp_index = [
i for i in Load_from_Indexes.EC_label_cols if i not in ["PAR_file", "Gas"]
] + ["PAR_date_day"]
_gasgrp = []
for gas in ["O2", "N2"]:
# gasn_grp = (gas,self.EIS_models.get(gas))
grp = EPgrp_gas.get_group(gas)
_varsgrp = [a for i in grp.lmfit_var_names.unique() for a in i.split(", ")]
_varsgrp += ["Rct_kin" for i in _varsgrp if "Rct" in i] + [
"Qad+Cdlp"
for i in _varsgrp
if all([i in _varsgrp for i in ["Qad", "Cdlp"]])
]
_sample_uniq_cols1 = set([i for i in grp.columns if grp[i].nunique() == 1])
# grp.lmfit_var_names.unique()[0].split(', ')
_grp = grp.rename(columns={i: i + f"_{gas}" for i in set(_varsgrp)})
# _grp = _grp.drop(columns='Gas')
# _grp.set_index(EC_exp_index+[ i for i in list(_sample_uniq_cols) if i not in _varsgrp],inplace=True)
# _grp = _grp.drop(columns=EC_exp_index+[ i for i in list(_sample_uniq_cols) if i not in _varsgrp])
# [i for i in Load_from_Indexes.EC_label_cols if i is not 'Gas']
_gasgrp.append(_grp)
# _ggidx = [i.set_index(EC_exp_index) for i in _gasgrp]
# pd.concat(_ggidx,axis=0)
# _dups = [[(count,item) for item, count in collections.Counter(i.index.values).items() if count > 1] for i in _ggidx]
# _DUP_PARFILES = pd.concat(_ggidx).loc[[a[1] for i in _dups for a in i]].sort_values('PAR_date').PAR_file.unique()
# pd.merge(_gasgrp[0],_gasgrp[1], on =EC_exp_index+[ i for i in list(_sample_uniq_cols) if i not in _varsgrp])
# pd.merge(*_ggidx,left_index=True, right_index=True)
EIS_N2O2 = pd.concat(_gasgrp, ignore_index=True)
# EIS_N2O2 = pd.merge(EP_N2,EP_O2, suffixes=['_N2','_O2'],on='SampleID')
Rsis = [
i
for i in EIS_N2O2.columns
if "Rs" in i and not any(c in i for c in ("stderr", "_kin_", "_setting"))
]
Rct_cols = [
i
for i in EIS_N2O2.columns
if "Rct" in i and not any(c in i for c in ("stderr", "_kin_"))
]
# EIS_pars_origin[Rsis] = EIS_pars_origin[Rsis].mask(EIS_pars_origin[Rsis] < 1)
EIS_N2O2[Rsis] = EIS_N2O2[Rsis].mask(EIS_N2O2[Rsis] < 1)
print("EIS Rs mask applied")
EIS_N2O2[Rct_cols] = EIS_N2O2[Rct_cols].mask(EIS_N2O2[Rct_cols] > 1e5)
print("EIS Rct mask applied")
EIS_N2O2 = EIS_N2O2.dropna(axis=1, how="all")
# RedChiSq_limit = ORReis_merge.query('Rs > 1').RedChisqr.mean()+ 1*ORReis_merge.query('Rs > 1').RedChisqr.std()
# ORReis_neat = ORReis_merge.query('RedChisqr < @RedChiSq_limit & Rs > 2 & Rct < 9E05')
EIS_N2O2_an, EIS_N2O2_cat = EIS_N2O2.copy(), EIS_N2O2.copy()
EIS_N2O2_an["Sweep_Type"] = "anodic"
EIS_N2O2_cat["Sweep_Type"] = "cathodic"
EIS_N2O2_new = pd.concat([EIS_N2O2_an, EIS_N2O2_cat], axis=0)
# EIS_pars_orig_mod = EIS_pars_origin.query('Model_EEC == @model_select')
return EIS_N2O2_new, "EIS"
def EIS_spectra_origin_prep(model_select=["Model(R0-L0-p(R1-Ws1,CPE1)-C2)"]):
eis_metaf, _specs = (
list(
EC_PorphSiO2.folder.parent.rglob(
"EIS_Porph_SiO2\meta_data*EIS*origin.xlsx"
)
),
[],
)
EISmeta = | pd.read_excel(eis_metaf[0], index_col=[0]) | pandas.read_excel |
from Queue import PriorityQueue
import numpy as np
import pandas as pd
import scipy.sparse as ss
import math
class FeatureExtractor(object):
def __init__(self, input_set, fragment_grouping_tol, loss_grouping_tol,
loss_threshold_min_count, loss_threshold_max_val,loss_threshold_min_val,
input_type):
self.all_ms1 = []
self.all_ms2 = []
self.all_counts = []
self.all_doc_labels = []
self.vocab = []
self.vocab_pos = {}
self.fragment_grouping_tol = fragment_grouping_tol
self.loss_grouping_tol = loss_grouping_tol
self.loss_threshold_min_count = loss_threshold_min_count
self.loss_threshold_max_val = loss_threshold_max_val
self.loss_threshold_min_val = loss_threshold_min_val
# load all the ms1 and ms2 files
self.F = len(input_set)
if input_type == 'filename': # load from csv file
for ms1_filename, ms2_filename in input_set:
print("Loading %s" % ms1_filename)
ms1 = pd.read_csv(ms1_filename, index_col=0)
self.all_ms1.append(ms1)
print("Loading %s" % ms2_filename)
ms2 = | pd.read_csv(ms2_filename, index_col=0) | pandas.read_csv |
"""
This is an upgraded version of Ceshine's LGBM starter script, simply adding more
average features and weekly average features on it.
"""
from datetime import date, timedelta
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
import pickle
print("Loading pickles...")
df_2017 = pickle.load(open('../input/processed/df_2017.pickle', 'rb'))
promo_2017 = pickle.load(open('../input/processed/promo_2017.pickle', 'rb'))
items = pickle.load(open('../input/processed/items.pickle', 'rb'))
df_test = pickle.load(open('../input/processed/df_test.pickle', 'rb'))
stores_items = pd.DataFrame(index=df_2017.index)
test_ids = df_test[['id']]
items = items.reindex(stores_items.index.get_level_values(1))
items_class = pd.get_dummies(items["class"], prefix="class", drop_first=True)
items_class.reset_index(drop=True, inplace=True)
def get_timespan(df, dt, minus, periods, freq='D'):
date_index = [c for c in pd.date_range(dt - timedelta(days=minus), periods=periods, freq=freq)
if c in df.columns]
return df[date_index]
def prepare_dataset(t2017, is_train=True):
X = pd.DataFrame({
"day_1_2017": get_timespan(df_2017, t2017, 1, 1).values.ravel(),
"mean_3_2017": get_timespan(df_2017, t2017, 3, 3).mean(axis=1).values,
"mean_7_2017": get_timespan(df_2017, t2017, 7, 7).mean(axis=1).values,
"mean_14_2017": get_timespan(df_2017, t2017, 14, 14).mean(axis=1).values,
"mean_30_2017": get_timespan(df_2017, t2017, 30, 30).mean(axis=1).values,
"mean_60_2017": get_timespan(df_2017, t2017, 60, 60).mean(axis=1).values,
"mean_140_2017": get_timespan(df_2017, t2017, 140, 140).mean(axis=1).values,
"promo_14_2017": get_timespan(promo_2017, t2017, 14, 14).sum(axis=1).values,
"promo_60_2017": get_timespan(promo_2017, t2017, 60, 60).sum(axis=1).values,
"promo_140_2017": get_timespan(promo_2017, t2017, 140, 140).sum(axis=1).values,
"mean_365_2017": get_timespan(df_2017, t2017, 365 - 8, 16).mean(axis=1).values, # yearly trend
})
for i in range(7):
X['mean_4_dow{}_2017'.format(i)] = get_timespan(df_2017, t2017, 28-i, 4, freq='7D').mean(axis=1).values
X['mean_20_dow{}_2017'.format(i)] = get_timespan(df_2017, t2017, 140-i, 20, freq='7D').mean(axis=1).values
for i in range(16):
X["promo_{}".format(i)] = promo_2017[
t2017 + timedelta(days=i)].values.astype(np.uint8)
#X = pd.concat([X, items_class], axis=1)
if is_train:
y = df_2017[
pd.date_range(t2017, periods=16)
].values
return X, y
return X
print("Preparing dataset...")
t2017 = date(2017, 5, 31)
X_l, y_l = [], []
for i in range(6):
delta = timedelta(days=7 * i)
X_tmp, y_tmp = prepare_dataset(
t2017 + delta
)
X_l.append(X_tmp)
y_l.append(y_tmp)
X_train = pd.concat(X_l, axis=0)
y_train = np.concatenate(y_l, axis=0)
del X_l, y_l
X_val, y_val = prepare_dataset(date(2017, 7, 26))
X_test = prepare_dataset(date(2017, 8, 16), is_train=False)
print("Training and predicting models...")
params = {
'num_leaves': 30,
'objective': 'regression',
'min_data_in_leaf': 250,
'learning_rate': 0.02,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 2,
'metric': 'l2',
'num_threads': 4
}
MAX_ROUNDS = 3000
val_pred = []
test_pred = []
cate_vars = []
for i in range(16):
print("=" * 50)
print("Step %d" % (i+1))
print("=" * 50)
dtrain = lgb.Dataset(
X_train, label=y_train[:, i],
categorical_feature=cate_vars,
weight= | pd.concat([items["perishable"]] * 6) | pandas.concat |
#
# Copyright (c) 2018 <NAME> <<EMAIL>>
#
# See the file LICENSE for your rights.
#
"""
Methods for processing VERIFICATION data.
"""
import os
import re
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import pickle
import requests
from collections import OrderedDict
from mosx.MesoPy import Meso
from mosx.obs.methods import get_obs_hourly, reindex_hourly
from mosx.util import generate_dates, get_array, get_ghcn_stid
def get_cf6_files(config, station_id, num_files=1):
"""
After code by <NAME>
Retrieves CF6 climate verification data released by the NWS. Parameter num_files determines how many recent files
are downloaded.
:param station_id: station ID to obtain cf6 files for
"""
# Create directory if it does not exist
site_directory = config['SITE_ROOT']
# Construct the web url address. Check if a special 3-letter station ID is provided.
nws_url = 'http://forecast.weather.gov/product.php?site=NWS&issuedby=%s&product=CF6&format=TXT'
stid3 = station_id[1:].upper()
nws_url = nws_url % stid3
# Determine how many files (iterations of product) we want to fetch
if num_files == 1:
if config['verbose']:
print('get_cf6_files: retrieving latest CF6 file for %s' % station_id)
else:
if config['verbose']:
print('get_cf6_files: retrieving %s archived CF6 files for %s' % (num_files, station_id))
# Fetch files
for r in range(1, num_files + 1):
# Format the web address: goes through 'versions' on NWS site which correspond to increasingly older files
version = 'version=%d&glossary=0' % r
nws_site = '&'.join((nws_url, version))
response = requests.get(nws_site)
cf6_data = response.text
# Remove the header
try:
body_and_footer = cf6_data.split('CXUS')[1] # Mainland US
except IndexError:
try:
body_and_footer = cf6_data.split('CXHW')[1] # Hawaii
except IndexError:
body_and_footer = cf6_data.split('CXAK')[1] # Alaska
body_and_footer_lines = body_and_footer.splitlines()
if len(body_and_footer_lines) <= 2:
body_and_footer = cf6_data.split('000')[2]
# Remove the footer
body = body_and_footer.split('[REMARKS]')[0]
# Find the month and year of the file
current_year = re.search('YEAR: *(\d{4})', body).groups()[0]
try:
current_month = re.search('MONTH: *(\D{3,9})', body).groups()[0]
current_month = current_month.strip() # Gets rid of newlines and whitespace
datestr = '%s %s' % (current_month, current_year)
file_date = datetime.strptime(datestr, '%B %Y')
except: # Some files have a different formatting, although this may be fixed now.
current_month = re.search('MONTH: *(\d{2})', body).groups()[0]
current_month = current_month.strip()
datestr = '%s %s' % (current_month, current_year)
file_date = datetime.strptime(datestr, '%m %Y')
# Write to a temporary file, check if output file exists, and if so, make sure the new one has more data
datestr = file_date.strftime('%Y%m')
filename = '%s/%s_%s.cli' % (site_directory, station_id.upper(), datestr)
temp_file = '%s/temp.cli' % site_directory
with open(temp_file, 'w') as out:
out.write(body)
def file_len(file_name):
with open(file_name) as f:
for i, l in enumerate(f):
pass
return i + 1
if os.path.isfile(filename):
old_file_len = file_len(filename)
new_file_len = file_len(temp_file)
if old_file_len < new_file_len:
if config['verbose']:
print('get_cf6_files: overwriting %s' % filename)
os.remove(filename)
os.rename(temp_file, filename)
else:
if config['verbose']:
print('get_cf6_files: %s already exists' % filename)
else:
if config['verbose']:
print('get_cf6_files: writing %s' % filename)
os.rename(temp_file, filename)
def _cf6(config, station_id):
"""
After code by <NAME>
This function is used internally only.
Generates verification values from climate CF6 files stored in SITE_ROOT. These files can be generated
externally by get_cf6_files.py. This function is not necessary if climo data from _climo is found, except for
recent values which may not be in the NCDC database yet.
:param config:
:param station_id: station ID to obtain cf6 files for
:return: dict: wind values from CF6 files
"""
if config['verbose']:
print('_cf6: searching for CF6 files in %s' % config['SITE_ROOT'])
allfiles = os.listdir(config['SITE_ROOT'])
filelist = [f for f in allfiles if f.startswith(station_id.upper()) and f.endswith('.cli')]
filelist.sort()
if len(filelist) == 0:
raise IOError('No CF6 files found.')
if config['verbose']:
print('_cf6: found %d CF6 files.' % len(filelist))
# Interpret CF6 files
if config['verbose']:
print('_cf6: reading CF6 files')
cf6_values = {}
for file in filelist:
year, month = re.search('(\d{4})(\d{2})', file).groups()
infile = open('%s/%s' % (config['SITE_ROOT'], file), 'r')
for line in infile:
matcher = re.compile(
'( \d|\d{2}) ( \d{2}|-\d{2}| \d| -\d|\d{3})')
if matcher.match(line):
# We've found an ob line!
lsp = line.split()
day = int(lsp[0])
curdt = datetime(int(year), int(month), day)
cf6_values[curdt] = {}
# Max temp
if lsp[1] == 'M':
cf6_values[curdt]['max_temp'] = -999.0
else:
cf6_values[curdt]['max_temp'] = float(lsp[1])
# Min temp
if lsp[2] == 'M':
cf6_values[curdt]['min_temp'] = 999.0
else:
cf6_values[curdt]['min_temp'] = float(lsp[2])
# Precipitation
if lsp[7] == 'M':
cf6_values[curdt]['precip'] = -999.0
elif lsp[7] == 'T':
cf6_values[curdt]['precip'] = 0
else:
cf6_values[curdt]['precip'] = float(lsp[7])
# Wind
if lsp[11] == 'M':
cf6_values[curdt]['wind'] = 0.0
else:
cf6_values[curdt]['wind'] = float(lsp[11]) * 0.868976
return cf6_values
def _climo(config, station_id, dates=None):
"""
Fetches climo data using ulmo package to retrieve NCDC archives.
:param config:
:param station_id: station ID to obtain cf6 files for
:param dates: list of datetime objects
:return: dict of high temp, low temp, max wind, and precipitation values
"""
import ulmo
if config['verbose']:
print('_climo: fetching data from NCDC (may take a while)...')
climo_dict = {}
ghcn_stid = get_ghcn_stid(config, station_id)
try:
D = ulmo.ncdc.ghcn_daily.get_data(ghcn_stid, as_dataframe=True, elements=['TMAX','TMIN','WSF2','PRCP'])
wind = D['WSF2']
use_wind = True
except KeyError: #no maximum wind data, perhaps because station is outside U.S.
D = ulmo.ncdc.ghcn_daily.get_data(ghcn_stid, as_dataframe=True, elements=['TMAX','TMIN','PRCP'])
use_wind = False
if dates is None:
dates = list(D['TMAX'].index.to_timestamp().to_pydatetime())
for date in dates:
try:
a = D['TMAX'].loc[date]
climo_dict[date] = {}
climo_dict[date]['max_temp'] = D['TMAX'].loc[date]['value']*0.18+32.0
climo_dict[date]['min_temp'] = D['TMIN'].loc[date]['value']*0.18+32.0
if use_wind:
climo_dict[date]['wind'] = D['WSF2'].loc[date]['value'] / 10. * 1.94384
climo_dict[date]['precip'] = D['PRCP'].loc[date]['value'] / 254.0
except KeyError: #missing data
if config['verbose']:
print('_climo: climo data missing for %s',date)
return climo_dict
def pop_rain(series):
"""
Converts a series of rain values into 0 or 1 depending on whether there is measurable rain
:param series:
:return:
"""
new_series = series.copy()
new_series[series >= 0.01] = 1.
new_series[series < 0.01] = 0.
return new_series
def categorical_rain(series):
"""
Converts a series of rain values into categorical precipitation quantities a la MOS.
:param series:
:return:
"""
new_series = series.copy()
for j in range(len(series)):
if series.iloc[j] < 0.01:
new_series.iloc[j] = 0.
elif series.iloc[j] < 0.10:
new_series.iloc[j] = 1.
elif series.iloc[j] < 0.25:
new_series.iloc[j] = 2.
elif series.iloc[j] < 0.50:
new_series.iloc[j] = 3.
elif series.iloc[j] < 1.00:
new_series.iloc[j] = 4.
elif series.iloc[j] < 2.00:
new_series.iloc[j] = 5.
elif series.iloc[j] >= 2.00:
new_series.iloc[j] = 6.
else: # missing, or something else that's strange
new_series.iloc[j] = 0.
return new_series
def verification(config, output_files=None, csv_files=None, use_cf6=True, use_climo=True, force_rain_quantity=False):
"""
Generates verification data from MesoWest and saves to a file, which is used to train the model and check test
results.
:param config:
:param output_files: str: output file path if just one station, or list of output file paths if multiple stations
:param csv_files: str: path to csv file containing observations if just one station, or list of paths to csv files if multiple stations
:param use_cf6: bool: if True, uses data from CF6 files (only for U.S. stations)
:param use_climo: bool: if True, uses data from NCDC climatology
:param force_rain_quantity: if True, returns the actual quantity of rain (rather than POP); useful for validation
files
:return:
"""
if config['multi_stations']: #Train on multiple stations
station_ids = config['station_id']
if len(station_ids) != len(output_files): #There has to be the same number of output files as station IDs, so raise error if not
raise ValueError("There must be the same number of output files as station IDs")
if len(station_ids) != len(csv_files): #There has to be the same number of output files as station IDs, so raise error if not
raise ValueError("There must be the same number of csv files as station IDs")
else:
station_ids = [config['station_id']]
if output_files is not None:
output_files = [output_files]
if csv_files is not None:
csv_files = [csv_files]
for i in range(len(station_ids)):
station_id = station_ids[i]
if output_files is None:
output_file = '%s/%s_verif.pkl' % (config['SITE_ROOT'], station_id)
else:
output_file = output_files[i]
if csv_files is None:
csv_file = '%s/%s_verif.csv' % (config['SITE_ROOT'], station_id)
else:
csv_file = csv_files[i]
dates = generate_dates(config)
api_dates = generate_dates(config, api=True, api_add_hour=config['forecast_hour_start'] + 24)
datename = 'date_time_minus_%d' % config['forecast_hour_start']
if config['verbose']:
print('verification: obtaining observations from csv file')
all_obspd = pd.read_csv(csv_file)
vars_request=['air_temp','precip_accum_one_hour', 'wind_speed', 'air_temp_low_6_hour', 'air_temp_high_6_hour','precip_accum_six_hour']
for var in vars_request[:]: #see if variable is available, and remove from vars_request list if not
try:
obspd = all_obspd[var]
if var == 'precip_accum_one_hour' and (sum(all_obspd['precip_accum_one_hour']) == 0 or np.isnan(sum(all_obspd['precip_accum_one_hour']))): #sometimes precip_accum_one_hour column exists even if there is no real data
vars_request.remove('precip_accum_one_hour')
except KeyError: #no such variable, so remove from vars_request list
vars_request.remove(var)
obspd = all_obspd[['date_time']+vars_request] #subset of data used as verification
obspd['date_time']=np.array([datetime.strptime(date, '%Y-%m-%d %H:%M:%S') for date in obspd['date_time'].values],dtype='datetime64[s]')
if config['verbose']:
print('verification: setting time back %d hours for daily statistics' % config['forecast_hour_start'])
dateobj = pd.to_datetime(obspd['date_time']) - timedelta(hours=config['forecast_hour_start'])
obspd['date_time'] = dateobj
obspd = obspd.rename(columns={'date_time': datename})
# Reformat data into hourly and daily
# Hourly
def hour(dates):
date = dates.iloc[0]
if type(date) == str: #if data is from csv file, date will be a string instead of a datetime object
#depending on which version of NumPy or pandas you use, the first or second statement will work
try:
date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
except:
date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S+00:00')
return datetime(date.year, date.month, date.day, date.hour)
def last(values):
return values.iloc[-1]
aggregate = {datename: hour}
if 'air_temp_high_6_hour' in vars_request and 'air_temp_low_6_hour' in vars_request:
aggregate['air_temp_high_6_hour'] = np.max
aggregate['air_temp_low_6_hour'] = np.min
aggregate['air_temp'] = {'air_temp_max': np.max, 'air_temp_min': np.min}
if 'precip_accum_six_hour' in vars_request:
aggregate['precip_accum_six_hour'] = np.max
aggregate['wind_speed'] = np.max
if 'precip_accum_one_hour' in vars_request:
aggregate['precip_accum_one_hour'] = np.max
if config['verbose']:
print('verification: grouping data by hour for hourly observations')
# Note that obs in hour H are reported at hour H, not H+1
obs_hourly = obspd.groupby([ | pd.DatetimeIndex(obspd[datename]) | pandas.DatetimeIndex |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = | pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq) | pandas.TimedeltaIndex |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
import math
import tensorflow as tf
from datetime import datetime
from tensorflow.keras.layers import Dense, LSTM, Concatenate, Input, Embedding, Reshape, StackedRNNCells, LSTMCell, Dropout, Lambda
from tensorflow.keras.models import Model, Sequential, load_model
from tensorflow.keras.optimizers import RMSprop, Adam
from tensorflow.keras.losses import CategoricalCrossentropy, SparseCategoricalCrossentropy
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from tensorflow.keras.metrics import CategoricalCrossentropy
from tensorflow.keras.activations import relu
from tensorflow.keras.layers.experimental.preprocessing import Normalization
from sklearn.preprocessing import OneHotEncoder
dados= | pd.read_csv("train_set.csv") | pandas.read_csv |
import sys
import pandas
FILENAME = sys.argv[1] #"20220121 Overview CG plates and compounds _consolidated RTG.xlsx"
dfs_per_sheetname = pandas.read_excel(FILENAME, sheet_name=None)
assert "experiments" in dfs_per_sheetname
df = dfs_per_sheetname["experiments"]
assert "experiment ID" in df.columns
assert "compound map see corresponding excel table" in df.columns
assert df["experiment ID"].is_unique
## non-limited list
assert "imaging campaigns" in dfs_per_sheetname
df = dfs_per_sheetname["imaging campaigns"]
assert "imaging campaign ID" in df.columns
assert "experiment ID" in df.columns
assert "timepoint in hours" in df.columns
assert "raw data available in zip file" in df.columns
assert "processed images available in folder" in df.columns
assert "cq1 analysis available in folder" in df.columns
assert "incucyte analyzed data available in csv file" in df.columns
##
assert df["imaging campaign ID"].is_unique
#assert "incucyte timestamp" in df.columns
assert "compounds" in dfs_per_sheetname
df = dfs_per_sheetname["compounds"]
assert "compound ID" in df.columns
assert "SMILES" in df.columns
df2 = df[df.duplicated(subset=["SMILES"], keep=False)]
if len(df2) > 0:
print("Sheet 'compounds': The following groups of entries have the same SMILES but different compound IDs:")
for g, s in df2.groupby("SMILES"):
print(f"{g} : ")
print(s)
print("---")
df2 = df[df.duplicated(subset=["compound ID"], keep=False)]
if len(df2) > 0:
print("Sheet 'compounds': The following groups of entries have the same compound ID but different SMILES:")
for g, s in df2.groupby("compound ID"):
print(f"{g} : ")
print(s)
print("---")
assert df["compound ID"].is_unique
assert df["SMILES"].is_unique
assert not df["SMILES"].str.contains("\n").any()
assert "compound batches" in dfs_per_sheetname
df = dfs_per_sheetname["compound batches"]
assert "compound batch ID" in df.columns
assert "compound ID" in df.columns
df2 = df[df.duplicated(subset=["compound batch ID"], keep=False)]
if len(df2) > 0:
print("Sheet 'compound batches': The following groups of entries have the same compound batch ID:")
for g, s in df2.groupby("compound batch ID"):
print(f"{g} : ")
print(s)
print("---")
assert df["compound batch ID"].is_unique
mapping_tables_to_check = list( [s for s in dfs_per_sheetname if "compound map" in s] )
for mapping_table_name in mapping_tables_to_check:
assert mapping_table_name in dfs_per_sheetname
df = dfs_per_sheetname[mapping_table_name]
assert "well ID" in df.columns
assert "well name" in df.columns
assert "compound batch ID" in df.columns
assert "concentration uM" in df.columns
assert "experimental type" in df.columns
## complex tests follow...
acceptable_experimental_types = ["chemogenomic candidate", "unrelated to this experiment", "blank", "control", "cells only"]
for mapping_table_name in mapping_tables_to_check:
df = dfs_per_sheetname[mapping_table_name]
## check that all rows contain one of the allowed values above
assert df["experimental type"].isin(acceptable_experimental_types).all()
# concentration should be only nan if experimental type is one of the below
cond1 = df["experimental type"] == "blank"
cond2 = df["concentration uM"].isna()
cond3 = df["experimental type"] == "cells only"
cond3b = df["experimental type"] == "unrelated to this experiment"
assert df[cond1].equals(df[(cond1) & (cond2)])
assert df[cond3].equals(df[(cond3) & (cond2)])
assert df[cond3b].equals(df[(cond3b) & (cond2)])
assert df[cond2].equals(df[(cond1) | (cond3) | (cond3b)])
# concentration should be >0 if experimental type is different than the ones above
df_out = df[~((cond1)|(cond3)|(cond3b))].query("not `concentration uM` > 0")
if len( df_out ) > 0:
print(f"Concentrations in table '{mapping_table_name}' are not in the expected range:")
print(df_out)
print("---")
# compound batch should be only nan if experimental type is one of the above
cond4 = df["compound batch ID"].isna()
assert df[cond1].equals(df[(cond4) & (cond1)])
assert df[cond3].equals(df[(cond4) & (cond3)])
assert df[cond3b].equals(df[(cond4) & (cond3b)])
assert df[cond4].equals(df[(cond1) | (cond3) | (cond3b)])
## ID reference tests
foo = dfs_per_sheetname["experiments"]["experiment ID"]
bar = dfs_per_sheetname["imaging campaigns"]["experiment ID"]
assert foo.isin(bar.values).all()
assert bar.isin(foo.values).all()
foo = dfs_per_sheetname["compound batches"]["compound ID"]
bar = dfs_per_sheetname["compounds"]["compound ID"]
bar_foo = set(bar) - set(foo)
if len(bar_foo) > 0:
print("INFO: There are compound IDs in table 'compounds', which are not referenced in table 'compound batches':")
print(bar_foo)
print("---")
foo_bar = set(foo) - set(bar)
if len(foo_bar) > 0:
print("There are compound IDs in table 'compound batches', which cannot be resolved from table 'compounds':")
print(foo_bar)
print("---")
assert foo.isin(bar.values).all()
assert bar.isin(foo.values).all()
for mapping_table_name in mapping_tables_to_check:
foo = dfs_per_sheetname["compound batches"]["compound batch ID"].unique()
bar = dfs_per_sheetname[mapping_table_name]
bar = bar[ bar["experimental type"] != "cells only" ]
bar = bar[ bar["experimental type"] != "blank"]
bar = bar[ bar["experimental type"] != "unrelated to this experiment"]
bar = bar["compound batch ID"].unique()
bar_foo = set(bar) - set(foo)
if len(bar_foo) > 0:
print(f"There are compound batches in table '{mapping_table_name}', which cannot be resolved from table 'compound batches':")
print(bar_foo)
print("---")
print("Done.")
## BLOCK to replace dummy values in the whole excel file
if True:
did_i_change_anything = False
mapping = {
# "old" : "new",
"dummy1" : "dummy1",
"dummy2" : "EUB0001080a",
"dummy3" : "DP000007a",
"dummy4" : "EUB0001108a",
"EUB0000500a" : "EUB0000871a",
"EUB0000528a" : "EUB0000841a",
"EUB0000543aCl" : "EUB0000213bCl",
"EUB0000550aCl" : "EUB0000196bCl",
"EUB0000657aPO4" : "EUB0000140bPO4",
"EUB0000667aCit" : "EUB0000286bCit",
"EUB0000675aCl" : "EUB0000130bCl",
"EUB0000092a" : "EUB0000092b"
}
import openpyxl
wb = openpyxl.load_workbook(FILENAME)
for sheetname in wb.sheetnames:
ws = wb[sheetname]
dimension = ws.calculate_dimension()
for row in ws[dimension]:
for cell in row:
if cell.value in mapping:
print(f"Changing cell {cell} from value {cell.value} to {mapping[cell.value]}")
cell.value = mapping[cell.value]
did_i_change_anything = True
if did_i_change_anything:
wb.save(FILENAME + ".changed.xlsx")
## ... end of BLOCK.
## BLOCK to check the whole excel file for trailing spaces in the fields
if True:
import openpyxl
wb = openpyxl.load_workbook(FILENAME)
for sheetname in wb.sheetnames:
ws = wb[sheetname]
dimension = ws.calculate_dimension()
for row in ws[dimension]:
for cell in row:
if type(cell.value) == str and cell.value.strip() != cell.value:
print(f"Sheet '{sheetname}', cell {cell.coordinate} contains undesired whitespace: '{cell.value}'")
## ... end of BLOCK.
## BLOCK to condense a list of superfluous entries in table 'compounds' vs correct table 'compound batches'
if False:
foo = dfs_per_sheetname["compound batches"]["compound ID"]
bar = dfs_per_sheetname["compounds"]["compound ID"]
bar_foo = set(bar) - set(foo)
dfs_per_sheetname["compounds"][~bar.isin(bar_foo)].to_excel("2022-02-03-new-compounds-sheet.xlsx")
## ... end of BLOCK.
## BLOCK to check for expected pattern in compound concentrations in one plate...
if False:
for mapping_table_name in mapping_tables_to_check:
foo = dfs_per_sheetname[mapping_table_name]
foo = foo[foo["experimental type"]=="chemogenomic candidate"]
print(mapping_table_name)
print("total len:",len(foo))
counter=0
for groupname, series in foo.groupby("eubopen ID"):
if len(series)!=2:
if len(series)==1:
if series["concentration uM"].item() == 10.0:
counter+=1
continue
print("potential ERROR:")
print(series)
else:
if sorted(series["concentration uM"].values) == [1.0, 10.0]:
counter+=2
else:
print("potential ERROR:")
print(series)
print("rather unsuspicious:", counter)
## ... end of BLOCK.
### BLOCK to check for consistency in data and produce condensed output, if EUbOPEN, SGC IDs, and compound names are given in the compound maps ...
if False:
collect_mappings_between_sgc_and_eubopen_id = {}
collect_mappings_between_compound_names_and_eubopen_id = {}
for mapping_table_name in mapping_tables_to_check:
spam = dfs_per_sheetname[mapping_table_name][["SGC ID", "eubopen ID"]].dropna().drop_duplicates()
spam = dfs_per_sheetname[mapping_table_name][["SGC ID", "eubopen ID"]].drop_duplicates()
same_sgc_different_eubopen = spam[spam.duplicated(subset="SGC ID", keep=False)]
same_eubopen_different_sgc = spam[spam.duplicated(subset="eubopen ID", keep=False)]
if len(same_eubopen_different_sgc)>0:
print(f"There are compound batches in table '{mapping_table_name}', which have different SGC IDs, but the same EUbOPEN ID:")
print(same_eubopen_different_sgc)
print("---")
if len(same_sgc_different_eubopen)>0:
print(f"There are compound batches in table '{mapping_table_name}', which have the same SGC ID, but different EUbOPEN IDs:")
print(same_sgc_different_eubopen)
print("---")
#assert len(same_sgc_different_eubopen) == 0
#assert len(same_eubopen_different_sgc) == 0
for sgc_id, s in spam.groupby("SGC ID"):
if sgc_id in collect_mappings_between_sgc_and_eubopen_id:
value = s["eubopen ID"].item()
if value != collect_mappings_between_sgc_and_eubopen_id[sgc_id] and not (pandas.isna(value) and | pandas.isna(collect_mappings_between_sgc_and_eubopen_id[sgc_id]) | pandas.isna |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 26 11:09:25 2018
@author: <NAME>
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats as st
import time
from sklearn import linear_model
from pylab import mpl
from scipy.optimize import fsolve
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from matplotlib.font_manager import _rebuild
_rebuild()
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
class CBond:
def __init__(self, Time, Price, Bond_Coupon, Number, path):
self.Time = Time
self.Price = Price
self.Bond_Coupon = Bond_Coupon
self.path = path
self.Number = Number
def RemainTime(self,T0,T,datatype):
if datatype == 'int':
return np.int(str(T-T0)[:-14])
elif datatype == 'float':
return np.float(str(T-T0)[:-14])/365
def BondValue(self):
T0 = self.Time['now']
T = self.Bond_Coupon.index[-1]
FV = self.Price['facevalue']
R = self.Price['riskfree']
period = self.RemainTime(T0,T,'float')
coupon = self.Bond_Coupon[T0<self.Bond_Coupon.index]
bondvalue = FV*np.exp(-R*period)
for i in range(len(coupon)):
t = coupon.index[i]
p = self.RemainTime(T0,t,'float')
bondvalue = bondvalue + FV*coupon[i]*np.exp(-R*p)
return bondvalue
def BSM(self):
T0 = self.Time['now']
T = self.Bond_Coupon.index[-1]
S0 = self.Price['now']
FV = self.Price['facevalue']
K = self.Price['strike']
R = self.Price['riskfree']
sigma = self.Price['volatility']
period = self.RemainTime(T0,T,'float')
d1 = (np.log(S0/K) + (R + 0.5*sigma**2) * period)/(sigma * np.sqrt(period))
d2 = (np.log(S0/K) + (R - 0.5*sigma**2) * period)/(sigma * np.sqrt(period))
Call = (S0 * st.norm.cdf(d1) - K * np.exp(-R*period) * st.norm.cdf(d2))*FV/K
return Call
def BSM_Model(self):
return self.BSM() + self.BondValue()
def MonteCarlo(self):
paths = self.path
sigma = self.Price['volatility']
R = self.Price['riskfree']
T0 = self.Time['now']
T = self.Bond_Coupon.index[-1]
period = self.RemainTime(T0,T,'int')
Price_paths = np.zeros((paths,period+1))
Price_paths[:,0]= self.Price['now']
dt = 1/365
np.random.seed(1111)
for t in range(1, period+1):
z = np.random.standard_normal(paths)
Price_paths[:,t] = Price_paths[:,t-1] * np.exp((R-0.5*sigma**2) * dt + sigma * np.sqrt(dt) * z)
return Price_paths
'''
path = MonteCarlo(Time,Price,Bond_Coupon,paths=5000)
plt.figure(figsize=(10,7))
plt.grid(True)
plt.xlabel('Time step')
plt.ylabel('index level')
for i in range(path.shape[1]):
plt.plot(path[i])
plt.show()
'''
def Resale(self,T0,S0):
T = self.Bond_Coupon.index[-1]
period = self.RemainTime(T0,T,'float')
FV = self.Price['facevalue']
P_resale = self.Price['resale']
sigma = self.Price['volatility']
R = self.Price['riskfree']
BV = self.BondValue(T0)
def okfine(K):
return (S0 * st.norm.cdf((np.log(S0/K) + (R + 0.5*sigma**2) * period)/(sigma * np.sqrt(period))) - K * np.exp(-R*period) * st.norm.cdf((np.log(S0/K) + (R - 0.5*sigma**2) * period)/(sigma * np.sqrt(period))))*FV/K+(BV-P_resale)
sol = fsolve(okfine,1)
return sol
def CouponValue(self,T0,T):
r = self.Price['riskfree']
FV = self.Price['facevalue']
if T>=self.Bond_Coupon.index[0]:
temp_Coupon = self.Bond_Coupon[self.Bond_Coupon.index <= T]
Coupon = temp_Coupon.values[-1]
else:
Coupon = 0
period = self.RemainTime(T0,T,'float')
#按照债券面值加当期应计利息的价格赎回
discounted_value = discounted_value = FV * (1+Coupon) * np.exp(-r*period)
return discounted_value
#多元非线性回归
def PolyRegression(self,X,Y): #X,Y numpy array type
quadratic = PolynomialFeatures(degree=2)
X_train = quadratic.fit_transform(X.reshape(-1,1))
X_test = X_train
Y_train = Y.reshape(-1,1)
regressor = LinearRegression()
regressor.fit(X_train,Y_train)
Y_test = regressor.predict(X_test)
return Y_test
def LSM_Model(self):
R = self.Price['riskfree'] # risk free rate
now = self.Time['now']
FV = self.Price['facevalue']
coupon_end = self.Bond_Coupon[-1]
trig_resale = self.Price['resale_trigger'] # resale price trigger
trig_redeem = self.Price['redeem_trigger'] # redeem price trigger
if pd.isna(trig_resale): # check if the resale price is negative
trig_resale = -1000000
#trig_resale_num = 1000000
if | pd.isna(trig_redeem) | pandas.isna |
from datetime import datetime
import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series
import pandas.util.testing as tm
class TestSeriesAlterAxes:
def test_setindex(self, string_series):
# wrong type
msg = (
r"Index\(\.\.\.\) must be called with a collection of some"
r" kind, None was passed"
)
with pytest.raises(TypeError, match=msg):
string_series.index = None
# wrong length
msg = (
"Length mismatch: Expected axis has 30 elements, new"
" values have 29 elements"
)
with pytest.raises(ValueError, match=msg):
string_series.index = np.arange(len(string_series) - 1)
# works
string_series.index = np.arange(len(string_series))
assert isinstance(string_series.index, Index)
# Renaming
def test_rename(self, datetime_series):
ts = datetime_series
renamer = lambda x: x.strftime("%Y%m%d")
renamed = ts.rename(renamer)
assert renamed.index[0] == renamer(ts.index[0])
# dict
rename_dict = dict(zip(ts.index, renamed.index))
renamed2 = ts.rename(rename_dict)
tm.assert_series_equal(renamed, renamed2)
# partial dict
s = Series(np.arange(4), index=["a", "b", "c", "d"], dtype="int64")
renamed = s.rename({"b": "foo", "d": "bar"})
tm.assert_index_equal(renamed.index, Index(["a", "foo", "c", "bar"]))
# index with name
renamer = Series(
np.arange(4), index=Index(["a", "b", "c", "d"], name="name"), dtype="int64"
)
renamed = renamer.rename({})
assert renamed.index.name == renamer.index.name
def test_rename_by_series(self):
s = Series(range(5), name="foo")
renamer = Series({1: 10, 2: 20})
result = s.rename(renamer)
expected = Series(range(5), index=[0, 10, 20, 3, 4], name="foo")
tm.assert_series_equal(result, expected)
def test_rename_set_name(self):
s = Series(range(4), index=list("abcd"))
for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]:
result = s.rename(name)
assert result.name == name
tm.assert_numpy_array_equal(result.index.values, s.index.values)
assert s.name is None
def test_rename_set_name_inplace(self):
s = Series(range(3), index=list("abc"))
for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]:
s.rename(name, inplace=True)
assert s.name == name
exp = np.array(["a", "b", "c"], dtype=np.object_)
tm.assert_numpy_array_equal(s.index.values, exp)
def test_rename_axis_supported(self):
# Supporting axis for compatibility, detailed in GH-18589
s = Series(range(5))
s.rename({}, axis=0)
s.rename({}, axis="index")
with pytest.raises(ValueError, match="No axis named 5"):
s.rename({}, axis=5)
def test_set_name_attribute(self):
s = Series([1, 2, 3])
s2 = Series([1, 2, 3], name="bar")
for name in [7, 7.0, "name", datetime(2001, 1, 1), (1,), "\u05D0"]:
s.name = name
assert s.name == name
s2.name = name
assert s2.name == name
def test_set_name(self):
s = Series([1, 2, 3])
s2 = s._set_name("foo")
assert s2.name == "foo"
assert s.name is None
assert s is not s2
def test_rename_inplace(self, datetime_series):
renamer = lambda x: x.strftime("%Y%m%d")
expected = renamer(datetime_series.index[0])
datetime_series.rename(renamer, inplace=True)
assert datetime_series.index[0] == expected
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(range(10))
s.index = idx
assert s.index.is_all_dates
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ["hash", "category"]
ser.name = "value"
df = ser.reset_index()
assert "value" in df
df = ser.reset_index(name="value2")
assert "value2" in df
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
s2.reset_index(drop=True, inplace=True)
tm.assert_series_equal(s, s2)
# level
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
)
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
assert len(rs.columns) == 2
rs = s.reset_index(level=[0, 2], drop=True)
tm.assert_index_equal(rs.index, Index(index.get_level_values(1)))
assert isinstance(rs, Series)
def test_reset_index_name(self):
s = Series([1, 2, 3], index=Index(range(3), name="x"))
assert s.reset_index().index.name is None
assert s.reset_index(drop=True).index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
for levels in ["A", "B"], [0, 1]:
# With MultiIndex
s = df.set_index(["A", "B"])["C"]
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index("B"))
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index("B"))
result = s.reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True)
tm.assert_frame_equal(result, df[["C"]])
with pytest.raises(KeyError, match="Level E "):
s.reset_index(level=["A", "E"])
# With single-level Index
s = df.set_index("A")["B"]
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df[["A", "B"]])
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df[["A", "B"]])
result = s.reset_index(level=levels[0], drop=True)
tm.assert_series_equal(result, df["B"])
with pytest.raises(IndexError, match="Too many levels"):
s.reset_index(level=[0, 1, 2])
# Check that .reset_index([],drop=True) doesn't fail
result = Series(range(4)).reset_index([], drop=True)
expected = Series(range(4))
tm.assert_series_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
s = Series(range(2), name="A", dtype="int64")
series_result = s.reset_index()
assert isinstance(series_result.index, RangeIndex)
series_expected = DataFrame(
[[0, 0], [1, 1]], columns=["index", "A"], index=RangeIndex(stop=2)
)
tm.assert_frame_equal(series_result, series_expected)
def test_reorder_levels(self):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
tm.assert_series_equal(s, result)
# no change, labels
result = s.reorder_levels(["L0", "L1", "L2"])
tm.assert_series_equal(s, result)
# rotate, position
result = s.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = Series(np.arange(6), index=e_idx)
tm.assert_series_equal(result, expected)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
s = Series([i for i in range(len(mi))], index=mi)
result = s.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
result = s.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
result = s.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
with pytest.raises(TypeError, match="unexpected"):
s.rename_axis(columns="wrong")
def test_rename_axis_inplace(self, datetime_series):
# GH 15704
expected = datetime_series.rename_axis("foo")
result = datetime_series
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}])
def test_rename_axis_none(self, kwargs):
# GH 25034
index = Index(list("abc"), name="foo")
df = Series([1, 2, 3], index=index)
result = df.rename_axis(**kwargs)
expected_index = index.rename(None) if kwargs else index
expected = Series([1, 2, 3], index=expected_index)
tm.assert_series_equal(result, expected)
def test_set_axis_inplace_axes(self, axis_series):
# GH14636
ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64")
expected = ser.copy()
expected.index = list("abcd")
# inplace=True
# The FutureWarning comes from the fact that we would like to have
# inplace default to False some day
result = ser.copy()
result.set_axis(list("abcd"), axis=axis_series, inplace=True)
tm.assert_series_equal(result, expected)
def test_set_axis_inplace(self):
# GH14636
s = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64")
expected = s.copy()
expected.index = list("abcd")
# inplace=False
result = s.set_axis(list("abcd"), axis=0, inplace=False)
tm.assert_series_equal(expected, result)
# omitting the "axis" parameter
with tm.assert_produces_warning(None):
result = s.set_axis(list("abcd"), inplace=False)
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
from CSDGAN.classes.image.ImageDataset import ImageFolderWithPaths, ImageFolder
import utils.image_utils as iu
import utils.utils as uu
import CSDGAN.utils.constants as cs
from torch.utils import data
import torchvision
from sklearn.model_selection import train_test_split
import shutil
import torchvision.transforms as t
import pandas as pd
import os
def import_dataset(path, bs, shuffle, incl_paths):
"""
Image generator for a directory containing folders as label names (and images of that label within each of these label-named folders)
:param path: Path to parent directory
:param bs: Batch size
:param shuffle: Whether to shuffle the data order
:param incl_paths: Whether to use ImageFolderWithPaths or simply ImageFolder (the former returns the path to each image as a third item in the iterator).
:return: PyTorch DataLoader
"""
if incl_paths:
dataset = ImageFolderWithPaths(
root=path,
transform=torchvision.transforms.ToTensor()
)
else:
dataset = ImageFolder(
root=path,
transform=torchvision.transforms.ToTensor()
)
loader = data.DataLoader(
dataset,
batch_size=bs,
num_workers=0,
shuffle=shuffle
)
return loader
def preprocess_imported_dataset(path, import_gen, splits=None, x_dim=None):
"""
Preprocesses entire image data set, cropping images and splitting them into train and validation folders.
Returns import information for future steps
1. Scan data set and map it by label
2. Split into train/val/test
3. Encodes labels for one hot encoding
4. Initializes directories
5. Preprocesses images and save to directory
6. Delete original images
:param path: Path where unprocessed images are located and where processed images will be saved
:param import_gen: PyTorch DataLoader with raw images
:param splits: Train/Validation/Test Splits
:param x_dim: Desired dimensions of image. If None, dimensions of first image are used.
:return: Tuple of label encoder, one hot encoder, and image dimensions
"""
if splits is None:
splits = cs.IMAGE_DEFAULT_TRAIN_VAL_TEST_SPLITS # Default
assert round(sum(splits), 5) == 1.0
assert len(splits) == 3
# Scan data set, create table mapping it out by label
dataset_map, labels = scan_image_dataset(path)
train_val_map, test_map = train_test_split(dataset_map, test_size=splits[2], shuffle=True, stratify=dataset_map['label'])
train_map, val_map = train_test_split(train_val_map, test_size=splits[1] / (splits[0]+splits[1]), stratify=train_val_map['label'])
train_map['split'], val_map['split'], test_map['split'] = 'train', 'val', 'test'
dataset_map = | pd.concat((train_map, val_map, test_map), axis=0) | pandas.concat |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import nose
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, Timestamp, isnull, notnull,
bdate_range, date_range, _np_version_under1p7)
import pandas.core.common as com
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_almost_equal,
ensure_clean)
import pandas.util.testing as tm
def _skip_if_numpy_not_friendly():
# not friendly for < 1.7
if _np_version_under1p7:
raise nose.SkipTest("numpy < 1.7")
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_numeric_conversions(self):
_skip_if_numpy_not_friendly()
self.assertEqual(ct(0), np.timedelta64(0,'ns'))
self.assertEqual(ct(10), np.timedelta64(10,'ns'))
self.assertEqual(ct(10,unit='ns'), np.timedelta64(10,'ns').astype('m8[ns]'))
self.assertEqual(ct(10,unit='us'), np.timedelta64(10,'us').astype('m8[ns]'))
self.assertEqual(ct(10,unit='ms'), np.timedelta64(10,'ms').astype('m8[ns]'))
self.assertEqual(ct(10,unit='s'), np.timedelta64(10,'s').astype('m8[ns]'))
self.assertEqual(ct(10,unit='d'), np.timedelta64(10,'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
_skip_if_numpy_not_friendly()
self.assertEqual(ct(timedelta(seconds=1)), np.timedelta64(1,'s').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)), np.timedelta64(1,'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)), np.timedelta64(1,'D').astype('m8[ns]'))
def test_short_format_converters(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10,'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10,'ns'))
self.assertEqual(ct('100'), np.timedelta64(100,'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100,'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000,'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000,'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000,'ns'))
self.assertEqual(ct('100ms'), np.timedelta64(100000000,'ns'))
self.assertEqual(ct('1000ms'), np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('-1s'), -np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('1s'), np.timedelta64(1000000000,'ns'))
self.assertEqual(ct('10s'), np.timedelta64(10000000000,'ns'))
self.assertEqual(ct('100s'), np.timedelta64(100000000000,'ns'))
self.assertEqual(ct('1000s'), np.timedelta64(1000000000000,'ns'))
self.assertEqual(ct('1d'), conv(np.timedelta64(1,'D')))
self.assertEqual(ct('-1d'), -conv(np.timedelta64(1,'D')))
self.assertEqual(ct('1D'), conv(np.timedelta64(1,'D')))
self.assertEqual(ct('10D'), conv(np.timedelta64(10,'D')))
self.assertEqual(ct('100D'), conv(np.timedelta64(100,'D')))
self.assertEqual(ct('1000D'), conv(np.timedelta64(1000,'D')))
self.assertEqual(ct('10000D'), conv(np.timedelta64(10000,'D')))
# space
self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000,'D')))
self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000,'D')))
# invalid
self.assertRaises(ValueError, ct, '1foo')
self.assertRaises(ValueError, ct, 'foo')
def test_full_format_converters(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
self.assertEqual(ct('1days'), conv(d1))
self.assertEqual(ct('1days,'), conv(d1))
self.assertEqual(ct('- 1days,'), -conv(d1))
self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1,'s')))
self.assertEqual(ct('06:00:01'), conv(np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('06:00:01.0'), conv(np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('06:00:01.01'), conv(np.timedelta64(1000*(6*3600+1)+10,'ms')))
self.assertEqual(ct('- 1days, 00:00:01'), -conv(d1+np.timedelta64(1,'s')))
self.assertEqual(ct('1days, 06:00:01'), conv(d1+np.timedelta64(6*3600+1,'s')))
self.assertEqual(ct('1days, 06:00:01.01'), conv(d1+np.timedelta64(1000*(6*3600+1)+10,'ms')))
# invalid
self.assertRaises(ValueError, ct, '- 1days, 00')
def test_nat_converters(self):
_skip_if_numpy_not_friendly()
self.assertEqual(to_timedelta('nat',box=False), tslib.iNaT)
self.assertEqual(to_timedelta('nan',box=False), tslib.iNaT)
def test_to_timedelta(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1,'D')
self.assertEqual(to_timedelta('1 days 06:05:01.00003',box=False), conv(d1+np.timedelta64(6*3600+5*60+1,'s')+np.timedelta64(30,'us')))
self.assertEqual(to_timedelta('15.5us',box=False), conv(np.timedelta64(15500,'ns')))
# empty string
result = to_timedelta('',box=False)
self.assertEqual(result, tslib.iNaT)
result = to_timedelta(['', ''])
self.assert_(isnull(result).all())
# pass thru
result = to_timedelta(np.array([np.timedelta64(1,'s')]))
expected = np.array([np.timedelta64(1,'s')])
tm.assert_almost_equal(result,expected)
# ints
result = np.timedelta64(0,'ns')
expected = to_timedelta(0,box=False)
self.assertEqual(result, expected)
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d','1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = Series([ np.timedelta64(0,'ns'), np.timedelta64(10,'s').astype('m8[ns]') ],dtype='m8[ns]')
expected = to_timedelta([0,10],unit='s')
tm.assert_series_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v,box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v,box=False)
expected = np.timedelta64(timedelta(seconds=1))
self.assertEqual(result, expected)
def test_to_timedelta_via_apply(self):
_skip_if_numpy_not_friendly()
# GH 5458
expected = Series([np.timedelta64(1,'s')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_timedelta_ops(self):
_skip_if_numpy_not_friendly()
# GH4984
# make sure ops return timedeltas
s = Series([Timestamp('20130101') + timedelta(seconds=i*i) for i in range(10) ])
td = s.diff()
result = td.mean()[0]
# TODO This should have returned a scalar to begin with. Hack for now.
expected = to_timedelta(timedelta(seconds=9))
tm.assert_almost_equal(result, expected)
result = td.quantile(.1)
# This properly returned a scalar.
expected = to_timedelta('00:00:02.6')
tm.assert_almost_equal(result, expected)
result = td.median()[0]
# TODO This should have returned a scalar to begin with. Hack for now.
expected = to_timedelta('00:00:08')
tm.assert_almost_equal(result, expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()[0]
expected = | to_timedelta('00:01:21') | pandas.to_timedelta |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# import warnings
# warnings.filterwarnings('ignore')
# In[2]:
# import libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import sparse
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
import pickle
# # Amazon Employee Access Challenge
# In[3]:
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
# In[4]:
train.shape
# In[5]:
test.shape
# In[6]:
y_train = train['ACTION']
# In[7]:
y_train.shape
# In[8]:
train_data = train.drop('ACTION', axis=1)
train_data.shape
# In[9]:
test_data = test.drop('id', axis=1)
test_data.shape
# ## Common Variables
# In[10]:
# define variables
random_state = 42
cv = 5
scoring = 'roc_auc'
verbose=2
# ## Common functions
# In[11]:
def save_submission(predictions, filename):
'''
Save predictions into csv file
'''
global test
submission = pd.DataFrame()
submission["Id"] = test["id"]
submission["ACTION"] = predictions
filepath = "result/sampleSubmission_"+filename
submission.to_csv(filepath, index = False)
# In[12]:
def print_graph(results, param1, param2, xlabel, ylabel, title='Plot showing the ROC_AUC score for various hyper parameter values'):
'''
Plot the graph
'''
plt.plot(results[param1],results[param2]);
plt.grid();
plt.xlabel(xlabel);
plt.ylabel(ylabel);
plt.title(title);
# In[13]:
def get_rf_params():
'''
Return dictionary of parameters for random forest
'''
params = {
'n_estimators':[10,20,50,100,200,500,700,1000],
'max_depth':[1,2,5,10,12,15,20,25],
'max_features':[1,2,3,4,5],
'min_samples_split':[2,5,7,10,20]
}
return params
# In[14]:
def get_xgb_params():
'''
Return dictionary of parameters for xgboost
'''
params = {
'n_estimators': [10,20,50,100,200,500,750,1000],
'learning_rate': uniform(0.01, 0.6),
'subsample': uniform(),
'max_depth': [3, 4, 5, 6, 7, 8, 9],
'colsample_bytree': uniform(),
'min_child_weight': [1, 2, 3, 4]
}
return params
# ### We will try following models
#
# 1. KNN
# 2. SVM
# 3. Logistic Regression
# 4. Random Forest
# 5. Xgboost
# ## Build Models on the raw data
# ## 1.1 KNN with raw features
# In[15]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[16]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[17]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[18]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[19]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_data,y_train)
# In[20]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, "knn_raw.csv")
# 
# ## 1.2 SVM with raw feature
# In[21]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[22]:
best_c=best_model.best_params_['C']
best_c
# In[23]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[24]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[25]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_data,y_train)
# In[26]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'svm_raw.csv')
# 
# ## 1.3 Logistic Regression with Raw Feature
# In[27]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[28]:
best_c=best_model.best_params_['C']
best_c
# In[29]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[30]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[31]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_data,y_train)
# In[32]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'lr_raw.csv')
# 
# ## 1.4 Random Forest with Raw Feature
# In[33]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_data,y_train)
# In[34]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[35]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[36]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_data,y_train)
# In[37]:
features=train_data.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# ## Features Observations:
#
# 1. MGR_ID is the most important feature followed by RESOURCE and ROLE_DEPTNAME
# In[38]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'rf_raw.csv')
# 
# ## 1.5 Xgboost with Raw Feature
# In[39]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_data,y_train)
# In[40]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_xgb_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[41]:
colsample_bytree = clf.best_params_['colsample_bytree']
learning_rate=clf.best_params_['learning_rate']
max_depth=clf.best_params_['max_depth']
min_child_weight=clf.best_params_['min_child_weight']
n_estimators=clf.best_params_['n_estimators']
subsample=clf.best_params_['subsample']
colsample_bytree,learning_rate,max_depth,min_child_weight,n_estimators,subsample
# In[42]:
model = XGBClassifier(colsample_bytree=colsample_bytree,learning_rate=learning_rate,max_depth=max_depth,
min_child_weight=min_child_weight,n_estimators=n_estimators,subsample=subsample,n_jobs=-1)
model.fit(train_data,y_train)
# In[43]:
features=train_data.columns
importance=model.feature_importances_
features=pd.DataFrame({'features':features,'value':importance})
features=features.sort_values('value',ascending=False)
sns.barplot('value','features',data=features);
plt.title('Feature Importance');
# In[44]:
predictions = model.predict_proba(test_data)[:,1]
save_submission(predictions, 'xgb_raw.csv')
# 
# 
# In[45]:
from prettytable import PrettyTable
x = PrettyTable(['Model', 'Feature', 'Private Score', 'Public Score'])
x.add_row(['KNN','Raw', 0.67224, 0.68148])
x.add_row(['SVM', 'Raw', 0.50286, 0.51390])
x.add_row(['Logistic Regression', 'Raw', 0.53857, 0.53034])
x.add_row(['Random Forest', 'Raw', 0.87269, 0.87567])
x.add_row(['Xgboost', 'Raw', 0.86988, 0.87909])
print(x)
# # Observations:
#
# 1. Xgboost perform best on the raw features
# 2. Random forest also perform good on raw features
# 3. Tree based models performs better than linear models for raw features
# ## Build model on one hot encoded features
# ### 2.1 KNN with one hot encoded features
# In[46]:
train_ohe = sparse.load_npz('data/train_ohe.npz')
test_ohe = sparse.load_npz('data/test_ohe.npz')
train_ohe.shape, test_ohe.shape, y_train.shape
# In[47]:
parameters={'n_neighbors':np.arange(1,100, 5)}
clf = RandomizedSearchCV(KNeighborsClassifier(n_jobs=-1),parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=4)
best_model = clf.fit(train_ohe,y_train)
# In[48]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_n_neighbors')
results
# In[49]:
print_graph(results, 'param_n_neighbors', 'mean_test_score', 'Hyperparameter - No. of neighbors', 'Test score')
# In[50]:
best_c=best_model.best_params_['n_neighbors']
best_c
# In[51]:
model = KNeighborsClassifier(n_neighbors=best_c,n_jobs=-1)
model.fit(train_ohe,y_train)
# In[52]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, "knn_ohe.csv")
# 
# ## 2.2 SVM with one hot encoded features
# In[53]:
C_val = uniform(loc=0, scale=4)
model= LinearSVC(verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
parameters={'C':C_val}
clf = RandomizedSearchCV(model,parameters,random_state=random_state,cv=cv,verbose=verbose,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_ohe,y_train)
# In[54]:
best_c=best_model.best_params_['C']
best_c
# In[55]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[56]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[57]:
#https://stackoverflow.com/questions/26478000/converting-linearsvcs-decision-function-to-probabilities-scikit-learn-python
model = LinearSVC(C=best_c,verbose=verbose,random_state=random_state,class_weight='balanced',max_iter=2000)
model = CalibratedClassifierCV(model)
model.fit(train_ohe,y_train)
# In[58]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'svm_ohe.csv')
# 
# ## 2.3 Logistic Regression with one hot encoded features
# In[59]:
C_val = uniform(loc=0, scale=4)
lr= LogisticRegression(verbose=verbose,random_state=random_state,class_weight='balanced',solver='lbfgs',max_iter=500,n_jobs=-1)
parameters={'C':C_val}
clf = RandomizedSearchCV(lr,parameters,random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_ohe,y_train)
# In[60]:
best_c=best_model.best_params_['C']
best_c
# In[61]:
results = pd.DataFrame.from_dict(best_model.cv_results_)
results=results.sort_values('param_C')
results
# In[62]:
print_graph(results, 'param_C', 'mean_test_score', 'Hyperparameter - C', 'Test score')
# In[63]:
model = LogisticRegression(C=best_c,verbose=verbose,n_jobs=-1,random_state=random_state,class_weight='balanced',solver='lbfgs')
model.fit(train_ohe,y_train)
# In[64]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'lr_ohe.csv')
# 
# ## 2.4 Random Forest with one hot encoded features
# In[65]:
rfc = RandomForestClassifier(random_state=random_state,class_weight='balanced',n_jobs=-1)
clf = RandomizedSearchCV(rfc,get_rf_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model = clf.fit(train_ohe,y_train)
# In[66]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_rf_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[67]:
n_estimators=clf.best_params_['n_estimators']
max_features=clf.best_params_['max_features']
max_depth=clf.best_params_['max_depth']
min_samples_split=clf.best_params_['min_samples_split']
n_estimators,max_features,max_depth,min_samples_split
# In[68]:
model=RandomForestClassifier(n_estimators=n_estimators,max_depth=max_depth,max_features=max_features,
min_samples_split=min_samples_split,
random_state=random_state,class_weight='balanced',n_jobs=-1)
model.fit(train_ohe,y_train)
# In[69]:
# features=train_ohe.columns
# importance=model.feature_importances_
# features=pd.DataFrame({'features':features,'value':importance})
# features=features.sort_values('value',ascending=False)
# sns.barplot('value','features',data=features);
# plt.title('Feature Importance');
# In[70]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'rf_ohe.csv')
# 
# ## 2.5 Xgboost with one hot encoded features
# In[71]:
xgb = XGBClassifier()
clf = RandomizedSearchCV(xgb,get_xgb_params(),random_state=random_state,cv=cv,verbose=verbose,n_iter=100,scoring=scoring,n_jobs=-1)
best_model=clf.fit(train_ohe,y_train)
# In[72]:
results = pd.DataFrame(best_model.cv_results_)
results.sort_values('mean_test_score',ascending=False,inplace=True)
param_keys=['param_'+str(each) for each in get_xgb_params().keys()]
param_keys.append('mean_test_score')
results[param_keys].head(10)
# In[73]:
colsample_bytree = clf.best_params_['colsample_bytree']
learning_rate=clf.best_params_['learning_rate']
max_depth=clf.best_params_['max_depth']
min_child_weight=clf.best_params_['min_child_weight']
n_estimators=clf.best_params_['n_estimators']
subsample=clf.best_params_['subsample']
colsample_bytree,learning_rate,max_depth,min_child_weight,n_estimators,subsample
# In[74]:
model = XGBClassifier(colsample_bytree=colsample_bytree,learning_rate=learning_rate,max_depth=max_depth,
min_child_weight=min_child_weight,n_estimators=n_estimators,subsample=subsample,n_jobs=-1)
model.fit(train_ohe,y_train)
# In[75]:
# features=train_ohe.columns
# importance=model.feature_importances_
# features=pd.DataFrame({'features':features,'value':importance})
# features=features.sort_values('value',ascending=False)
# sns.barplot('value','features',data=features);
# plt.title('Feature Importance');
# In[76]:
predictions = model.predict_proba(test_ohe)[:,1]
save_submission(predictions, 'xgb_ohe.csv')
# 
# 
# In[77]:
from prettytable import PrettyTable
x = PrettyTable(['Model', 'Feature', 'Private Score', 'Public Score'])
x.add_row(['KNN','ohe', 0.81657, 0.81723])
x.add_row(['SVM', 'ohe', 0.87249, 0.87955])
x.add_row(['Logistic Regression', 'ohe', 0.87436, 0.88167])
x.add_row(['Random Forest', 'ohe', 0.84541, 0.84997])
x.add_row(['Xgboost', 'ohe', 0.84717, 0.85102])
print(x)
# # Observations:
#
# 1. One hot encoding features performs better than other encoding technique
# 2. Linear models (Logistic Regression and SVM) performs better on higher dimension
# # 3 Build Model on frequency encoding feature
# ## 3.1 KNN with frequency encoding
# In[78]:
train_df_fc = | pd.read_csv('data/train_df_fc.csv') | pandas.read_csv |
import os
from pprint import pprint as pp
import pandas as pd
import tweepy
from src.extract_data import clean_data, get_mongo_client
from shared.db.utils import insert_data
def main():
auth = tweepy.OAuthHandler(os.environ["API_KEY"], os.environ["API_KEY_SECRET"])
auth.set_access_token(os.environ["ACCESS_TOKEN"], os.environ["ACCESS_TOKEN_SECRET"])
api = tweepy.API(auth)
pp(os.environ)
client = get_mongo_client()
db = client["sncf"]
userID = "sncfisajoke"
df_db = pd.DataFrame(list(db.trains_statistics.find()))
df_db = df_db.drop(["_id"], axis=1)
df_db["date"] = pd.to_datetime(df_db["date"], format="%d-%m-%Y")
df_db = df_db.sort_values(by=["date"], ascending=[True])
latest_date_db = df_db["date"].values[-1]
tweets = api.user_timeline(screen_name=userID, count=15, include_rts=False, tweet_mode="extended")
df = | pd.DataFrame([info.full_text for info in tweets], columns=["Text"]) | pandas.DataFrame |
import tempfile
from datetime import datetime
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
import pytz
from eemeter.testing.mocks import MockWeatherClient
from eemeter.weather import ISDWeatherSource
from eemeter.modeling.formatters import ModelDataBillingFormatter
from eemeter.structures import EnergyTrace
@pytest.fixture
def mock_isd_weather_source():
tmp_url = "sqlite:///{}/weather_cache.db".format(tempfile.mkdtemp())
ws = ISDWeatherSource("722880", tmp_url)
ws.client = MockWeatherClient()
return ws
@pytest.fixture
def trace1():
data = {
"value": [1, 1, 1, 1, np.nan],
"estimated": [False, False, True, False, False]
}
columns = ["value", "estimated"]
index = [
datetime(2011, 1, 1, tzinfo=pytz.UTC),
datetime(2011, 2, 1, tzinfo=pytz.UTC),
datetime(2011, 3, 2, tzinfo=pytz.UTC),
datetime(2011, 4, 3, tzinfo=pytz.UTC),
datetime(2011, 4, 29, tzinfo=pytz.UTC),
]
df = pd.DataFrame(data, index=index, columns=columns)
return EnergyTrace("ELECTRICITY_CONSUMPTION_SUPPLIED", df, unit="KWH")
@pytest.fixture
def trace2():
data = {
"value": [np.nan],
"estimated": [True]
}
columns = ["value", "estimated"]
index = [
datetime(2011, 1, 1, tzinfo=pytz.UTC),
]
df = | pd.DataFrame(data, index=index, columns=columns) | pandas.DataFrame |
'''
File name: 00_generate_database.py
Author: <NAME>
Date created: 02/13/2018
Date last modified: 02/13/2018
Python Version: 2.7
Description: Script to create database for psychosis project
Project: Psychosis
'''
import pandas as pd
import numpy as np
import sys
import os
sys.path.append(os.environ.get("CODEDIR"))
pd.options.mode.chained_assignment = None # default='warn'
from prebids.databasing import psyrc, psydb, psy, exclude
# clean different tables
DB = pd.read_csv(os.environ.get("NIDBTABLE"))
DB = psydb.clean_DB(DB)
RC = pd.read_csv(os.environ.get("REDCAPTABLE"),low_memory=False)
RC = psyrc.clean_RC(RC)
cols = psyrc.redcap_instruments(RC)
# initiate big database
cols = psyrc.redcap_instruments(RC)
cols = [x for sublist in cols.values() for x in sublist]
cols += ['UID','in_redcap','in_nidb','exclude']
PSYDB = pd.DataFrame({x:[] if not x =='UID' else "" for x in cols})
# merge in redcap and nidb
PSYDB = psydb.nidb_merge(PSYDB,DB)
PSYDB.fillna(value={'in_nidb':False})
PSYDB = psyrc.redcap_merge(PSYDB,RC)
# deal with excluded subjects
EX = exclude.instantiate_exclusion_table(DB,RC)
EXDB = | pd.DataFrame(EX) | pandas.DataFrame |
import pandas as pd
import numpy as np
import geopandas as gpd
from pathlib import Path
"""
Script filled with functions useful towards the importation and creation of datasets
"""
##Paths per muoversi in cookiecutter
data_path_in = Path('./../data/raw')
data_path_out = Path('./../data/processed')
models_path = Path('./../models')
files = {'grid':['trentino-grid.geojson',"geojson"],
'adm_reg':['administrative_regions_Trentino.json',"json"],
'weather':['meteotrentino-weather-station-data.json',"json"],
'precip':['precipitation-trentino.csv',"csv"],
'precip-avail':['precipitation-trentino-data-availability.csv',"csv"],
'SET-1':['SET-nov-2013.csv',"csv"],
'SET-2':['SET-dec-2013.csv',"csv"],
'SET-lines':['line.csv',"csv"],
'twitter':['social-pulse-trentino.geojson',"json"],
'regions':['Com01012013/Com01012013_WGS84.shp',"shape"],
'circoscrizioni':['CircoscrizioniTN/circoscrizioni.shp',"shape"]}
def safe_import(inp):
"""
Function that imports data from a file, turns it into a pandas dataframe,
and prints the types of every variable to check for correctness of import
To be used appropriately inside a notebook
"""
filename=files[inp][0]
filetype=files[inp][1]
fl=data_path_in / filename
if(filetype=="geojson"):
out=gpd.read_file(fl)
if(filetype=="csv"):
out=pd.read_csv(fl)
if(filetype=="json"):
out=pd.read_json(fl, orient="values")
if(filetype=="shape"):
out=gpd.read_file(fl)
print("SafeImport_Output: ",out.keys())
return out
def appforth(df, line):
"""
Function that adds a line at the top of a dataframe
"""
df.loc[-1]=line
df.index = df.index + 1 # shifting index
df = df.sort_index() # sorting by index
return df
def orderstation(weatherdf):
"""
Funzione che ordina il dataframe del weather per estrarre
caratteristiche uniche delle stazioni, quali nome, posizione, elevazione
Comodo quando devo trovare la stazione più vicina ad un punto
"""
stazioni = weatherdf['station'].unique()
coltmpr=["station", "elevation", "geometry"]
station_stats=gpd.GeoDataFrame(columns=coltmpr)
for idx,stat in enumerate(stazioni):
temp = pd.DataFrame(weatherdf[weatherdf['station']==stat])
station_stats.loc[idx]="NaN"
station_stats.loc[idx]["geometry"]=temp.loc[temp.index[0],:]["geometry"]
station_stats.loc[idx]["station"]=stat
station_stats.loc[idx]["elevation"]=temp.loc[temp.index[0],:]["elevation"]
return station_stats
varconv={0 : "temperatures.", 1 : "precipitations."}
def find_Weather(weatherdf, month, day, hour, stationName, varType=0):
"""
Funzione che cerca il valore dentro al database weather dato, per una certa data+ora fornita
Gli input sono autoesplicativi, varType=0 vuol dire temperaura, varType=1 vuol dire precipitation
"""
cellname="%02d%02d" % (int(np.floor(hour)),int((hour%1)*60))
cellname=varconv[varType]+cellname
df=weatherdf[weatherdf['station']==stationName]
df=df[ df['date']==("2013-%02d-%02d"%(month,day)) ]
#Se manca il dato posso procedere in 2 modi
#1) Lo prendo mezz'ora prima o dopo che non varia troppo (Operazione non così banale e unsafe)
#2) ritorna NaN, avrò meno statistica nell'EDA ma non importa, i NaN son pochi
# In questa versione, uso la 2
if(df[cellname].isnull().all()):
return np.NAN
return float(df[cellname])
def Wday(month, day):
"""
Function that yields the weekday given the month and the day
Works for the year 2013, months 11 and 12
"""
out=["Mo","Tu","We","Th","Fr","Sa","Su"]
if(month==11):
return out[(4+day)%7]
if(month==12):
return out[(6+day)%7]
def scale(v):
"""
Funzione che scala un vettore, ovvero sposta l'i-esimo elemento all'i+1-esimo indice
Elimina l'ultimo elemento della sequenza, e mette -1000 nel primo (per i nostri scopi è comodo così)
Ritorna il vettore scalato
"""
v.insert(0, -1000) #Insert front
del v[-1] #Pop back
return v
def df_reg():
"""
Funzione che tratta i dataframe raffinati per produrre un nuovo dataframe atto a fare il machine learning
Ritorna il dataframe stesso
Si basa sull'avere i databases nella cartella processed quindi fare attenzione
"""
dfTweets=pd.read_csv(data_path_out / "twitter_final.csv")
dfTemp= | pd.read_csv(data_path_out / "weather_final.csv") | pandas.read_csv |
# Copyright 2017-2021 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import pandas as pd
import numpy as np
import time
import requests
import json
import math
from moonshot.slippage import FixedSlippage
from moonshot.mixins import WeightAllocationMixin
from moonshot.cache import Cache
from moonshot.exceptions import MoonshotError, MoonshotParameterError
from quantrocket.price import get_prices
from quantrocket.master import list_calendar_statuses, download_master_file
from quantrocket.account import download_account_balances, download_exchange_rates
from quantrocket.blotter import list_positions, download_order_statuses
class Moonshot(
WeightAllocationMixin):
"""
Base class for Moonshot strategies.
To create a strategy, subclass this class. Implement your trading logic in the class
methods, and store your strategy parameters as class attributes.
Class attributes include built-in Moonshot parameters which you can override, as well
as your own custom parameters.
To run a backtest, at minimum you must implement `prices_to_signals`, but in general you will
want to implement the following methods (which are called in the order shown):
`prices_to_signals` -> `signals_to_target_weights` -> `target_weights_to_positions` -> `positions_to_gross_returns`
To trade (i.e. generate orders intended to be placed, but actually placed by other services
than Moonshot), you must also implement `order_stubs_to_orders`. Order generation for trading
follows the path shown below:
`prices_to_signals` -> `signals_to_target_weights` -> `order_stubs_to_orders`
Parameters
----------
CODE : str, required
the strategy code
DB : str, required
code of db to pull data from
DB_FIELDS : list of str, optional
fields to retrieve from db (defaults to ["Open", "Close", "Volume"])
DB_TIMES : list of str (HH:MM:SS), optional
for intraday databases, only retrieve these times
DB_DATA_FREQUENCY : str, optional
Only applicable when DB specifies a Zipline bundle. Whether to query minute or
daily data. If omitted, defaults to minute data for minute bundles and to daily
data for daily bundles. This parameter only needs to be set to request daily data
from a minute bundle. Possible choices: daily, minute (or aliases d, m).
SIDS : list of str, optional
limit db query to these sids
UNIVERSES : list of str, optional
limit db query to these universes
EXCLUDE_SIDS : list of str, optional
exclude these sids from db query
EXCLUDE_UNIVERSES : list of str, optional
exclude these universes from db query
CONT_FUT : str, optional
pass this cont_fut option to db query (default None)
LOOKBACK_WINDOW : int, optional
get this many days additional data prior to the backtest start date or
trade date to account for rolling windows. If set to None (the default),
will use the largest value of any attributes ending with `*_WINDOW`, or
252 if no such attributes, and will further pad window based on any
`*_INTERVAL` attributes, which are interpreted as pandas offset aliases
(for example `REBALANCE_INTERVAL = 'Q'`). Set to 0 to disable.
NLV : dict, optional
dict of currency:NLV for each currency represented in the strategy. Can
alternatively be passed directly to backtest method.
COMMISSION_CLASS : Class or dict of (sectype,exchange,currency):Class, optional
the commission class to use. If strategy includes a mix of security types,
exchanges, or currencies, you can pass a dict mapping tuples of
(sectype,exchange,currency) to the different commission classes. By default
no commission is applied.
SLIPPAGE_CLASSES : iterable of slippage classes, optional
one or more slippage classes. By default no slippage is applied.
SLIPPAGE_BPS : float, optional
amount on one-slippage to apply to each trade in BPS (for example, enter 5 to deduct
5 BPS)
BENCHMARK : str, optional
the sid of a security in the historical data to use as the benchmark
BENCHMARK_DB : str, optional
the database containing the benchmark, if different from DB. BENCHMARK_DB
should contain end-of-day data, not intraday (but can be used with intraday
backtests).
BENCHMARK_TIME : str (HH:MM:SS), optional
use prices from this time of day as benchmark prices. Only applicable if
benchmark prices originate in DB (not BENCHMARK_DB), DB contains intraday
data, and backtest results are daily.
TIMEZONE : str, optional
convert timestamps to this timezone (if not provided, will be inferred
from securities universe if possible)
CALENDAR : str, optional
use this exchange's trading calendar to determine which date's signals
should be used for live trading. If the exchange is currently open,
today's signals will be used. If currently closed, the signals corresponding
to the last date the exchange was open will be used. If no calendar is specified,
today's signals will be used.
POSITIONS_CLOSED_DAILY : bool
if True, positions in backtests that fall on adjacent days are assumed to
be closed out and reopened each day rather than held continuously; this
impacts commission and slippage calculations (default is False, meaning
adjacent positions are assumed to be held continuously)
ALLOW_REBALANCE : bool or float
in live trading, whether to allow rebalancing of existing positions that
are already on the correct side. If True (the default), allow rebalancing.
If False, no rebalancing. If set to a positive decimal, allow rebalancing
only when the existing position differs from the target position by at least
this percentage. For example 0.5 means don't rebalance a position unless
the position will change by +/-50%.
CONTRACT_VALUE_REFERENCE_FIELD : str, optional
the price field to use for determining contract values for the purpose of
applying commissions and constraining weights in backtests and calculating
order quantities in trading. Defaults to the first available of Close, Open,
MinuteCloseClose, SecondCloseClose, LastPriceClose, BidPriceClose, AskPriceClose,
TimeSalesLastPriceClose, TimeSalesFilteredLastPriceClose, LastPriceMean,
BidPriceMean, AskPriceMean, TimeSalesLastPriceMean, TimeSalesFilteredLastPriceMean,
MinuteOpenOpen, SecondOpenOpen, LastPriceOpen, BidPriceOpen, AskPriceOpen,
TimeSalesLastPriceOpen, TimeSalesFilteredLastPriceOpen.
ACCOUNT_BALANCE_FIELD : str or list of str, optional
the account field to use for calculating order quantities as a percentage of
account equity. Applies to trading only, not backtesting. Default is
NetLiquidation. If a list of fields is provided, the minimum value is used.
For example, ['NetLiquidation', 'PreviousEquity'] means to use the lesser of
NetLiquidation or PreviousEquity to determine order quantities.
Examples
--------
Example of a minimal strategy that runs on a history db called "mexi-stk-1d" and buys when
the securities are above their 200-day moving average:
>>> MexicoMovingAverage(Moonshot):
>>>
>>> CODE = "mexi-ma"
>>> DB = "mexi-stk-1d"
>>> MAVG_WINDOW = 200
>>>
>>> def prices_to_signals(self, prices):
>>> closes = prices.loc["Close"]
>>> mavgs = closes.rolling(self.MAVG_WINDOW).mean()
>>> signals = closes > mavgs.shift()
>>> return signals.astype(int)
"""
CODE = None
DB = None
DB_FIELDS = ["Open", "Close", "Volume"]
DB_TIMES = None
DB_DATA_FREQUENCY = None
SIDS = None
UNIVERSES = None
EXCLUDE_SIDS = None
EXCLUDE_UNIVERSES = None
CONT_FUT = None
LOOKBACK_WINDOW = None
NLV = None
COMMISSION_CLASS = None
SLIPPAGE_CLASSES = ()
SLIPPAGE_BPS = 0
BENCHMARK = None
BENCHMARK_DB = None
BENCHMARK_TIME = None
TIMEZONE = None
CALENDAR = None
POSITIONS_CLOSED_DAILY = False
ALLOW_REBALANCE = True
CONTRACT_VALUE_REFERENCE_FIELD = None
ACCOUNT_BALANCE_FIELD = None
def __init__(self):
self.is_trade = False
self.review_date = None # see trade() docstring
self.is_backtest = False
self._securities_master = None
self._backtest_results = {}
self._inferred_timezone = None
self._signal_date = None # set by _weights_to_today_weights
self._signal_time = None # set by _weights_to_today_weights
def prices_to_signals(self, prices):
"""
From a DataFrame of prices, return a DataFrame of signals. By convention,
signals should be 1=long, 0=cash, -1=short.
Must be implemented by strategy subclasses.
Parameters
----------
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
DataFrame
signals
Examples
--------
Buy when the close is above yesterday's 50-day moving average:
>>> def prices_to_signals(self, prices):
>>> closes = prices.loc["Close"]
>>> mavgs = closes.rolling(50).mean()
>>> signals = closes > mavgs.shift()
>>> return signals.astype(int)
"""
raise NotImplementedError("strategies must implement prices_to_signals")
def signals_to_target_weights(self, signals, prices):
"""
From a DataFrame of signals, return a DataFrame of target weights.
Whereas signals indicate the direction of the trades, weights
indicate both the direction and size. For example, -0.5 means a short
position equal to 50% of the equity allocated to the strategy.
Weights are used to help create orders in live trading, and to help
simulate executed positions in backtests.
The default implemention of this method evenly divides allocated
capital among the signals each period, but it is intended to be
overridden by strategy subclasses.
A variety of built-in weight allocation algorithms are provided by
and documented under `moonshot.mixins.WeightAllocationMixin`.
Parameters
----------
signals : DataFrame, required
a DataFrame of signals
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame
of price/market data
Returns
-------
DataFrame
weights
Examples
--------
The default implementation is shown below:
>>> def signals_to_target_weights(self, signals, prices):
>>> weights = self.allocate_equal_weights(signals) # provided by moonshot.mixins.WeightAllocationMixin
>>> return weights
"""
weights = self.allocate_equal_weights(signals)
return weights
def target_weights_to_positions(self, weights, prices):
"""
From a DataFrame of target weights, return a DataFrame of simulated
positions.
The positions should shift the weights based on when the weights
would be filled in live trading.
By default, assumes the position are taken in the period after the
weights were allocated. Intended to be overridden by strategy
subclasses.
Parameters
----------
weights : DataFrame, required
a DataFrame of weights
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
DataFrame
positions
Examples
--------
The default implemention is shown below (enter position in the period after
signal generation/weight allocation):
>>> def target_weights_to_positions(self, weights, prices):
>>> positions = weights.shift()
>>> return positions
"""
positions = weights.shift()
return positions
def positions_to_gross_returns(self, positions, prices):
"""
From a DataFrame of positions, return a DataFrame of returns before
commissions and slippage.
By default, assumes entry on the close on the period the position is
taken and calculates the return through the following period's close.
Intended to be overridden by strategy subclasses.
Parameters
----------
positions : DataFrame, required
a DataFrame of positions
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
DataFrame
gross returns
Examples
--------
The default implementation is shown below:
>>> def positions_to_gross_returns(self, positions, prices):
>>> closes = prices.loc["Close"]
>>> gross_returns = closes.pct_change() * positions.shift()
>>> return gross_returns
"""
closes = prices.loc["Close"]
gross_returns = closes.pct_change() * positions.shift()
return gross_returns
def order_stubs_to_orders(self, orders, prices):
"""
From a DataFrame of order stubs, creates a DataFrame of fully
specified orders.
Parameters
----------
orders : DataFrame
a DataFrame of order stubs, with columns Sid, Account, Action,
OrderRef, and TotalQuantity
prices : DataFrame
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
DataFrame
a DataFrame of fully specified orders, with (at minimum) columns
Exchange, Tif, OrderType added
Examples
--------
The orders DataFrame provided to this method resembles the following:
>>> print(orders)
Sid Account Action OrderRef TotalQuantity
0 12345 U12345 SELL my-strategy 100
1 12345 U55555 SELL my-strategy 50
2 23456 U12345 BUY my-strategy 100
3 23456 U55555 BUY my-strategy 50
4 34567 U12345 BUY my-strategy 200
5 34567 U55555 BUY my-strategy 100
The default implemention creates MKT DAY orders and is
shown below:
>>> def order_stubs_to_orders(self, orders, prices):
>>> orders["OrderType"] = "MKT"
>>> orders["Tif"] = "DAY"
>>> return orders
Set a limit price equal to the prior closing price:
>>> closes = prices.loc["Close"]
>>> prior_closes = closes.shift()
>>> prior_closes = self.reindex_like_orders(prior_closes, orders)
>>> orders["OrderType"] = "LMT"
>>> orders["LmtPrice"] = prior_closes
"""
orders["OrderType"] = "MKT"
orders["Tif"] = "DAY"
return orders
def reindex_like_orders(self, df, orders):
"""
Reindexes a DataFrame (having Sids as columns and dates as index)
to match the shape of the orders DataFrame.
Parameters
----------
df : DataFrame, required
a DataFrame of arbitrary values with Sids as columns and
dates as index
orders : DataFrame, required
an orders DataFrame with a Sid column
Returns
-------
Series
a Series with an index matching orders
Examples
--------
Calculate prior closes (assuming daily bars) and reindex like
orders:
>>> closes = prices.loc["Close"]
>>> prior_closes = closes.shift()
>>> prior_closes = self.reindex_like_orders(prior_closes, orders)
Calculate prior closes (assuming 30-min bars) and reindex like
orders:
>>> session_closes = prices.loc["Close"].xs("15:30:00", level="Time")
>>> prior_closes = session_closes.shift()
>>> prior_closes = self.reindex_like_orders(prior_closes, orders)
"""
df = df.loc[self._signal_date]
if "Time" in df.index.names:
if not self._signal_time:
raise MoonshotError(
"cannot reindex DataFrame like orders because DataFrame contains "
"'Time' in index, please take a cross-section first, for example: "
"`my_dataframe.xs('15:45:00', level='Time')`")
df = df.loc[self._signal_time]
df.name = "_MoonshotOther"
df = orders.join(df, on="Sid")._MoonshotOther
df.name = None
return df
def orders_to_child_orders(self, orders):
"""
From a DataFrame of orders, returns a DataFrame of child orders
(bracket orders) to be submitted if the parent orders fill.
An OrderId column will be added to the orders DataFrame, and child
orders will be linked to it via a ParentId column. The Action
(BUY/SELL) will be reversed on the child orders but otherwise the
child orders will be identical to the parent orders.
Parameters
----------
orders : DataFrame, required
an orders DataFrame
Returns
-------
DataFrame
a DataFrame of child orders
Examples
--------
>>> orders.head()
Sid Action TotalQuantity Exchange OrderType Tif
0 12345 BUY 200 SMART MKT Day
1 23456 BUY 400 SMART MKT Day
>>> child_orders = self.orders_to_child_orders(orders)
>>> child_orders.loc[:, "OrderType"] = "MOC"
>>> orders = pd.concat([orders,child_orders])
>>> orders.head()
Sid Action TotalQuantity Exchange OrderType Tif OrderId ParentId
0 12345 BUY 200 SMART MKT Day 0 NaN
1 23456 BUY 400 SMART MKT Day 1 NaN
0 12345 SELL 200 SMART MOC Day NaN 0
1 23456 SELL 400 SMART MOC Day NaN 1
"""
if "OrderId" not in orders.columns:
orders["OrderId"] = orders.index.astype(str) + ".{0}".format(time.time())
child_orders = orders.copy()
child_orders.rename(columns={"OrderId":"ParentId"}, inplace=True)
child_orders.loc[orders.Action=="BUY", "Action"] = "SELL"
child_orders.loc[orders.Action=="SELL", "Action"] = "BUY"
return child_orders
def _quantities_to_order_stubs(self, quantities):
"""
From a DataFrame of quantities to be ordered (with Sids as index,
Accounts as columns), returns a DataFrame of order stubs.
quantities in:
Account U12345 U55555
Sid
12345 -100 -50
23456 100 50
34567 200 100
order_stubs out:
Sid Account Action OrderRef TotalQuantity
0 12345 U12345 SELL my-strategy 100
1 12345 U55555 SELL my-strategy 50
2 23456 U12345 BUY my-strategy 100
3 23456 U55555 BUY my-strategy 50
4 34567 U12345 BUY my-strategy 200
5 34567 U55555 BUY my-strategy 100
"""
quantities.index.name = "Sid"
quantities.columns.name = "Account"
quantities = quantities.stack()
quantities.name = "Quantity"
order_stubs = quantities.to_frame().reset_index()
order_stubs["Action"] = np.where(order_stubs.Quantity > 0, "BUY", "SELL")
order_stubs = order_stubs.loc[order_stubs.Quantity != 0].copy()
order_stubs["OrderRef"] = self.CODE
order_stubs["TotalQuantity"] = order_stubs.Quantity.abs()
order_stubs = order_stubs.drop("Quantity",axis=1)
return order_stubs
def _get_nlv(self):
"""
Return a dict of currency:NLV for each currency in the strategy. By
default simply returns the NLV class attribute.
"""
return self.NLV
def _positions_to_turnover(self, positions):
"""
Given a dataframe of positions, returns a dataframe of turnover. 0
indicates no turnover; 1 indicates going from 100% short to cash or
cash to 100% long (for example), and vice versa; and 2 indicates
going from 100% short to %100 long (for example).
"""
# Intraday trades are opened and closed each day there's a position,
# so the turnover is twice the positions.
if self.POSITIONS_CLOSED_DAILY:
turnover = positions * 2
else:
turnover = positions.fillna(0).diff()
return turnover.abs()
def _weights_to_today_weights(self, weights, prices):
"""
From a DataFrame of target weights, extract the row that contains the
weights that should be used for today's trading. Returns a Series of
weights by sid:
Sid
12345 -0.2
23456 0
34567 0.1
The date whose weights are selected is usually today, but if CALENDAR
is used and the market is closed it will be the date when the market
closed. Can also be overridden by review_date.
For intraday strategies, the time whose weights are selected is the
latest time that is earlier than the time at which the strategy is
running.
"""
# First, get the signal date
# Use review_date if set
if self.review_date:
dt = pd.Timestamp(self.review_date)
# Else use trading calendar if provided
elif self.CALENDAR:
status = list_calendar_statuses([self.CALENDAR])[self.CALENDAR]
# If the exchange if closed, the signals should correspond to the
# date the exchange was last open
if status["status"] == "closed":
dt = pd.Timestamp(status["since"])
# If the exchange is open, the signals should correspond to
# today's date
else:
dt = pd.Timestamp.now(tz=status["timezone"])
# If no trading calendar, use today's date (in strategy timezone)
else:
tz = self.TIMEZONE or self._inferred_timezone
dt = pd.Timestamp.now(tz=tz)
# Keep only the date as the signal_date
self._signal_date = pd.Timestamp(dt.date())
# extract the current time (or review date time)
trade_time = dt.strftime("%H:%M:%S")
weights_is_intraday = "Time" in weights.index.names
try:
today_weights = weights.loc[self._signal_date]
except KeyError:
if weights_is_intraday:
max_date = weights.index.get_level_values("Date").max()
else:
max_date = weights.index.max()
msg = ("expected signal date {0} not found in target weights DataFrame, "
"is the underlying data up-to-date? (max date is {1})")
if not self.CALENDAR and not weights_is_intraday and self._signal_date.date() - max_date.date() == pd.Timedelta(days=1):
msg += (" If your strategy trades before the open and {0} data "
"is not expected, try setting CALENDAR = <exchange>")
raise MoonshotError(msg.format(
self._signal_date.date().isoformat(),
max_date.date().isoformat()))
if not weights_is_intraday:
print("using target weights for {0} to create orders".format(self._signal_date.date().isoformat()))
return today_weights
# For intraday strategies, select the weights from the latest time
# that is earlier than the trade time. Note that we select the
# expected time from the entire weights DataFrame, which will result
# in a failure if that time is missing for the trade date
unique_times = weights.index.get_level_values("Time").unique()
self._signal_time = unique_times[unique_times < trade_time].max()
if pd.isnull(self._signal_time):
msg = (
"cannot determine which target weights to use for orders because "
"target weights DataFrame contains no times earlier than trade time {0} "
"for signal date {1}".format(
trade_time,
self._signal_date.date().isoformat()))
if self.review_date:
msg += ", please adjust the review_date"
raise MoonshotError(msg)
# get_prices inserts all times into each day's index, thus
# the signal_time will be in the weights DataFrame even if the data
# is stale. Instead, to validate the data, we make sure that there is
# at least one nonnull field in the prices DataFrame at the
# signal_time on the signal_date
today_prices = prices.xs(self._signal_date, level="Date")
notnull_today_prices = today_prices[today_prices.notnull().any(axis=1)]
try:
no_signal_time_prices = notnull_today_prices.xs(self._signal_time, level="Time").empty
except KeyError:
no_signal_time_prices = True
if no_signal_time_prices:
msg = ("no {0} data found in prices DataFrame for signal date {1}, "
"is the underlying data up-to-date? (max time for {1} "
"is {2})")
notnull_max_date = notnull_today_prices.iloc[-1].name[-1]
raise MoonshotError(msg.format(
self._signal_time,
self._signal_date.date().isoformat(),
notnull_max_date))
today_weights = today_weights.loc[self._signal_time]
print("using target weights for {0} at {1} to create orders".format(
self._signal_date.date().isoformat(),
self._signal_time))
return today_weights
def _get_commissions(self, positions, prices):
"""
Returns the commissions to be subtracted from the returns.
"""
if not self.COMMISSION_CLASS:
return pd.DataFrame(0, index=positions.index, columns=positions.columns)
turnover = self._positions_to_turnover(positions)
contract_values = self._get_contract_values(prices)
prices_is_intraday = "Time" in prices.index.names
positions_is_intraday = "Time" in positions.index.names
if prices_is_intraday and not positions_is_intraday:
contract_values = contract_values.groupby(
contract_values.index.get_level_values("Date")).first()
fields = prices.index.get_level_values("Field").unique()
if "Nlv" in self._securities_master.columns:
nlvs = contract_values.apply(lambda x: self._securities_master.Nlv, axis=1)
else:
nlvs = None
# handle the case of only one commission class
if not isinstance(self.COMMISSION_CLASS, dict):
commissions = self.COMMISSION_CLASS.get_commissions(contract_values, turnover=turnover, nlvs=nlvs)
return commissions
# handle multiple commission classes per sectype/exchange/currency
# first, tuple-ize the dict keys in case they are lists
commission_classes = {}
for sec_group, commission_cls in self.COMMISSION_CLASS.items():
commission_classes[tuple(sec_group)] = commission_cls
defined_sec_groups = set([tuple(k) for k in commission_classes.keys()])
# Reindex master fields like contract_values
sec_types = contract_values.apply(lambda x: self._securities_master.SecType, axis=1)
exchanges = contract_values.apply(lambda x: self._securities_master.Exchange, axis=1)
currencies = contract_values.apply(lambda x: self._securities_master.Currency, axis=1)
required_sec_groups = set([
tuple(s.split("|")) for s in (sec_types+"|"+exchanges+"|"+currencies).iloc[-1].unique()])
missing_sec_groups = required_sec_groups - defined_sec_groups
if missing_sec_groups:
raise MoonshotParameterError("expected a commission class for each combination of (sectype,exchange,currency) "
"but none is defined for {0}".format(
", ".join(["({0})".format(",".join(t)) for t in missing_sec_groups])))
all_commissions = | pd.DataFrame(None, index=positions.index, columns=positions.columns) | pandas.DataFrame |
from bs4 import BeautifulSoup
import requests
import numpy as np
import re
import pprint
import pandas as pd
headers = {
'user-agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/79.0.3945.117 '
'Safari/537.36'
}
session = requests.session()
session.headers.update(headers)
# 获取成绩
def get_grades(semesterIds='62,81,101,121'):
response = session.get("https://jw.ustc.edu.cn/for-std/grade/sheet/getGradeList?trainTypeId=1",
params={'semesterIds': semesterIds})
soup = BeautifulSoup(response.content, 'lxml')
content = soup.p.contents[0]
content = re.sub('true', 'True', content)
content = re.sub('null', 'None', content)
# 按学期先取出成绩
scores_semesters = re.findall(r'"scores":\[.*?\]', content)
# 再把每学期的成绩取出
scores = []
for i in range(len(scores_semesters)):
scores += (re.findall(r'\{.*?\}', scores_semesters[i]))
pop_list = ['id', 'courseNameCh', 'semesterEn', 'score', 'credits', 'gp']
for i in range(len(scores)):
exec('scores[i] = ' + scores[i])
keys = list(scores[i].keys())
for key in keys:
if key not in pop_list:
scores[i].pop(key)
# 处理成DataFrame
pd.set_option('display.unicode.ambiguous_as_wide', True)
| pd.set_option('display.unicode.east_asian_width', True) | pandas.set_option |
from __future__ import annotations
from collections import namedtuple
from typing import TYPE_CHECKING
import warnings
from matplotlib.artist import setp
import numpy as np
from pandas.core.dtypes.common import is_dict_like
from pandas.core.dtypes.missing import remove_na_arraylike
import pandas as pd
import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._matplotlib.core import (
LinePlot,
MPLPlot,
)
from pandas.plotting._matplotlib.style import get_standard_colors
from pandas.plotting._matplotlib.tools import (
create_subplots,
flatten_axes,
maybe_adjust_figure,
)
if TYPE_CHECKING:
from matplotlib.axes import Axes
class BoxPlot(LinePlot):
_kind = "box"
_layout_type = "horizontal"
_valid_return_types = (None, "axes", "dict", "both")
# namedtuple to hold results
BP = namedtuple("BP", ["ax", "lines"])
def __init__(self, data, return_type="axes", **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last
# column label
if self.orientation == "vertical":
self.sharex = False
else:
self.sharey = False
@classmethod
def _plot(cls, ax, y, column_num=None, return_type="axes", **kwds):
if y.ndim == 2:
y = [remove_na_arraylike(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na_arraylike(y)
bp = ax.boxplot(y, **kwds)
if return_type == "dict":
return bp, bp
elif return_type == "both":
return cls.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _validate_color_args(self):
if "color" in self.kwds:
if self.colormap is not None:
warnings.warn(
"'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'"
)
self.color = self.kwds.pop("color")
if isinstance(self.color, dict):
valid_keys = ["boxes", "whiskers", "medians", "caps"]
for key in self.color:
if key not in valid_keys:
raise ValueError(
f"color dict contains invalid key '{key}'. "
f"The key must be either {valid_keys}"
)
else:
self.color = None
# get standard colors for default
colors = | get_standard_colors(num_colors=3, colormap=self.colormap, color=None) | pandas.plotting._matplotlib.style.get_standard_colors |
import unittest
import pandas as pd
from pandas.util.testing import assert_series_equal
import numpy as np
from easyframes.easyframes import hhkit
# from easyframes.easyframes import hhkit
class TestEgen(unittest.TestCase):
def setUp(self):
"""
df_original = pd.read_csv('sample_hh_dataset.csv')
df = df_original.copy()
print(df.to_dict())
"""
self.df = pd.DataFrame(
{'educ': {0: 'secondary', 1: 'bachelor', 2: 'primary', 3: 'higher', 4: 'bachelor', 5: 'secondary',
6: 'higher', 7: 'higher', 8: 'primary', 9: 'primary'},
'hh': {0: 1, 1: 1, 2: 1, 3: 2, 4: 3, 5: 3, 6: 4, 7: 4, 8: 4, 9: 4},
'has_car': {0: 1, 1: 1, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1, 9: 1},
'weighthh': {0: 2, 1: 2, 2: 2, 3: 3, 4: 2, 5: 2, 6: 3, 7: 3, 8: 3, 9: 3},
'house_rooms': {0: 3, 1: 3, 2: 3, 3: 2, 4: 1, 5: 1, 6: 3, 7: 3, 8: 3, 9: 3},
'prov': {0: 'BC', 1: 'BC', 2: 'BC', 3: 'Alberta', 4: 'BC', 5: 'BC', 6: 'Alberta',
7: 'Alberta', 8: 'Alberta', 9: 'Alberta'},
'id': {0: 1, 1: 2, 2: 3, 3: 1, 4: 1, 5: 2, 6: 1, 7: 2, 8: 3, 9: 4},
'age': {0: 44, 1: 43, 2: 13, 3: 70, 4: 23, 5: 20, 6: 37, 7: 35, 8: 8, 9: 15},
'fridge': {0: 'yes', 1: 'yes', 2: 'yes', 3: 'no', 4: 'yes', 5: 'yes', 6: 'no',
7: 'no', 8: 'no', 9: 'no'},
'male': {0: 1, 1: 0, 2: 1, 3: 1, 4: 1, 5: 0, 6: 1, 7: 0, 8: 0, 9: 0}})
self.my_include = np.array([False, False, False, True, True, False, True, True, True, False])
self.my_include_using_integer = np.array([0, 0, 0, 1, 5, 0, -10, -30, -1, 0])
self.my_include_using_float = np.array([0, 0, 0, 1, 10.3, 0, -10, -30, -1, 0])
self.my_include_using_nonnumeric = np.array(['0', 0, 0, 1, 10.3, 0, -10, -30, -1, 0])
def test_reject_both_exclude_and_include(self):
myhhkit = hhkit()
try:
df2 = myhhkit.egen(operation='count', groupby='hh', col='hh',
exclude=self.my_include, include=self.my_include)
except:
return True
raise Exception("Both include and exclude were allowed")
def test_no_include_no_exclude_includes_all_rows(self):
myhhkit = hhkit(self.df)
# myhhkit.from_dict(self.df)
myhhkit.egen(operation='count', groupby='hh', column='hh')
correct_values = pd.Series([3, 3, 3, 1, 2, 2, 4, 4, 4, 4])
assert_series_equal(correct_values, myhhkit.df['(count) hh by hh'])
def test_specify_include_yields_correct_results_count(self):
myhhkit = hhkit(self.df)
# myhhkit.from_dict(self.df)
myhhkit.egen(operation='count', groupby='hh', column='hh', include=self.my_include)
correct_values = pd.Series([np.nan, np.nan, np.nan, 1, 1, 1, 3, 3, 3, 3])
assert_series_equal(correct_values, myhhkit.df['(count) hh by hh'])
def test_specify_include_yields_correct_results_mean(self):
myhhkit = hhkit(self.df)
# myhhkit.from_dict(self.df)
myhhkit.egen(operation='mean', groupby='hh', column='age', include=self.my_include)
correct_values = pd.Series([np.nan, np.nan, np.nan, 70, 23, 23, 26.666666, 26.666666,
26.666666, 26.666666])
assert_series_equal(correct_values, myhhkit.df['(mean) age by hh'])
def test_specify_exclude_yields_correct_results_count(self):
myhhkit = hhkit(self.df)
# myhhkit.from_dict(self.df)
myhhkit.egen(operation='count', groupby='hh', column='hh', exclude=self.my_include)
correct_values = pd.Series([3, 3, 3, np.nan, 1, 1, 1, 1, 1, 1])
assert_series_equal(correct_values, myhhkit.df['(count) hh by hh'])
def test_specify_exclude_yields_correct_results_mean(self):
myhhkit = hhkit(self.df)
# myhhkit.from_dict(self.df)
myhhkit.egen(operation='mean', groupby='hh', column='age', exclude=self.my_include)
correct_values = pd.Series([33.333333, 33.333333, 33.333333, np.nan, 20, 20, 15, 15, 15, 15])
| assert_series_equal(correct_values, myhhkit.df['(mean) age by hh']) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 12 18:41:32 2017
@author: mvanoudh
"""
import numpy as np
import pandas as pd
import sklearn
#import sklearn.cross_validation
from sklearn.base import clone
def cross_val_score2(estimator, X, y=None, scoring=None,
parameters=None, cv=None, verbose=0, fit_params=None,
early_stop=False, stopping_threshold=None,
return_last_estimator=False,
return_predict = False,
return_predict_proba = False
):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
parameters : dict or None
Parameters to be set on the estimator.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
early_stop : boolean
if True the cross_validation stop if the result are bad.
Stops if : NaN in the score, or after at least 3 iteration if the best score is bellow the 'stopping_threshold'
stopping_threshold : float
cross_validation stops if best score is bellow (after 3 iterations) if early_stop = True
return_last_estimator : boolean (default = False)
if True, the last estimator is also return (to test)
return_predict : boolean (default = False)
if True, the cross validated prediction will be returned as well
return_predict_proba : boolean (default = False)
if True, the cross validated prediction of proba will be returned as well
Returns
-------
scores : pd.DataFrame, shape = len(cv) , 4.
The columns are :
train : score on training data (in-sample score)
test : score on testing data (out-of-sample score)
test_size : the number of observation in the testing set
time : the time it took to fit
if return_last_estimator is True, the scores and the last estimator are returned
"""
#Rmk : the function is mostly a copy of sklearn cross_val_score with a few additions
if early_stop and stopping_threshold is None:
raise ValueError("I need a stopping_threshold when 'early_stop' is True")
X, y = sklearn.cross_validation.indexable(X, y)
cv = sklearn.cross_validation.check_cv(cv, X, y, classifier=sklearn.cross_validation.is_classifier(estimator))
scorer = sklearn.cross_validation.check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
all_scores = list()
i = 1
if return_predict:
y_hat = np.empty(y.shape,dtype=y.dtype)
if return_predict_proba:
y_hat_proba = []
for train, test in cv:
if verbose > 0:
print("cv %d started\n" % i )
new_estimator = clone(estimator)
temp_res = sklearn.cross_validation._fit_and_score(new_estimator,X,y, scorer, train, test, verbose, parameters, fit_params,return_train_score=True)
score_train, score_test, nb_of_observations, time_of_fit = temp_res
### New ###
# TODO : faire un truc predict OU predict_proba
if return_predict:
y_hat[test] = new_estimator.predict(X[test,:])
if return_predict_proba:
df_proba = pd.DataFrame(new_estimator.predict_proba(X[test,:]),columns = list(new_estimator.classes_) , index = test)
y_hat_proba.append(df_proba)
# pr_proba = new_estimator.predict_proba(X[test,:])
# if y_hat_proba is None:
# classes = new_estimator.classes_
# y_hat_proba = np.empty((len(y),len(classes)))
#
# assert np.all(new_estimator.classes_ == classes)
# y_hat_proba[test,:] = pr_proba
if verbose >0:
print("cv %d done!\n\n" % i)
if verbose > 1:
print("score train : %2.2f%% , score test : %2.2f%%" % (100*score_train,100*score_test))
all_scores.append((score_train, score_test,
nb_of_observations, time_of_fit))
if early_stop:
if | pd.isnull(score_test) | pandas.isnull |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
reader = StataReader(path)
parsed_time_stamp = dt.datetime.strptime(reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path), InvalidColumnName)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(KeyError,
lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path),
PossiblePrecisionLoss)
# should produce a single warning
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
tm.assert_produces_warning(original.to_stata(path, {0: 'tc'}),
InvalidColumnName)
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = dict(((c, c) for c in columns))
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_bool_uint(self):
s0 = Series([0, 1, True], dtype=np.bool)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3,
's4': s4, 's5': s5, 's6': s6})
original.index.name = 'index'
expected = original.copy()
expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32,
np.int32, np.float64)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
sr_115 = StataReader(self.dta16_115).variable_labels()
sr_117 = StataReader(self.dta16_117).variable_labels()
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
for k,v in compat.iteritems(sr_115):
self.assertTrue(k in sr_117)
self.assertTrue(v == sr_117[k])
self.assertTrue(k in keys)
self.assertTrue(v in labels)
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
sr = StataReader(path)
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
self.assertTrue(int(variable[1:]) == typ)
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len, 'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.assertRaises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ('b','h','l')
df = DataFrame([[0.0]],columns=['float_'])
with tm.ensure_clean() as path:
df.to_stata(path)
valid_range = StataReader(path).VALID_RANGE
expected_values = ['.' + chr(97 + i) for i in range(26)]
expected_values.insert(0, '.')
for t in types:
offset = valid_range[t][1]
for i in range(0,27):
val = StataMissingValue(offset+1+i)
self.assertTrue(val.string == expected_values[i])
# Test extremes for floats
val = StataMissingValue(struct.unpack('<f',b'\x00\x00\x00\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<f',b'\x00\xd0\x00\x7f')[0])
self.assertTrue(val.string == '.z')
# Test extremes for floats
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<d',b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0])
self.assertTrue(val.string == '.z')
def test_missing_value_conversion(self):
columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
smv = StataMissingValue(101)
keys = [key for key in iterkeys(smv.MISSING_VALUES)]
keys.sort()
data = []
for i in range(27):
row = [StataMissingValue(keys[i+(j*27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data,columns=columns)
parsed_113 = read_stata(self.dta17_113, convert_missing=True)
parsed_115 = read_stata(self.dta17_115, convert_missing=True)
parsed_117 = read_stata(self.dta17_117, convert_missing=True)
tm.assert_frame_equal(expected, parsed_113)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
def test_big_dates(self):
yr = [1960, 2000, 9999, 100, 2262, 1677]
mo = [1, 1, 12, 1, 4, 9]
dd = [1, 1, 31, 1, 22, 23]
hr = [0, 0, 23, 0, 0, 0]
mm = [0, 0, 59, 0, 0, 0]
ss = [0, 0, 59, 0, 0, 0]
expected = []
for i in range(len(yr)):
row = []
for j in range(7):
if j == 0:
row.append(
datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i]))
elif j == 6:
row.append(datetime(yr[i], 1, 1))
else:
row.append(datetime(yr[i], mo[i], dd[i]))
expected.append(row)
expected.append([NaT] * 7)
columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq',
'date_th', 'date_ty']
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999,12,24)
expected[2][3] = datetime(9999,12,1)
expected[2][4] = datetime(9999,10,1)
expected[2][5] = datetime(9999,7,1)
expected[4][2] = datetime(2262,4,16)
expected[4][3] = expected[4][4] = datetime(2262,4,1)
expected[4][5] = expected[4][6] = datetime(2262,1,1)
expected[5][2] = expected[5][3] = expected[5][4] = datetime(1677,10,1)
expected[5][5] = expected[5][6] = datetime(1678,1,1)
expected = DataFrame(expected, columns=columns, dtype=np.object)
parsed_115 = read_stata(self.dta18_115)
parsed_117 = read_stata(self.dta18_117)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
date_conversion = dict((c, c[-2:]) for c in columns)
#{c : c[-2:] for c in columns}
with tm.ensure_clean() as path:
expected.index.name = 'index'
expected.to_stata(path, date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_dtype_conversion(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
no_conversion = read_stata(self.dta15_117,
convert_dates=True)
tm.assert_frame_equal(expected, no_conversion)
conversion = read_stata(self.dta15_117,
convert_dates=True,
preserve_dtypes=False)
# read_csv types are the same
expected = self.read_csv(self.csv15)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
tm.assert_frame_equal(expected, conversion)
def test_drop_column(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
columns = ['byte_', 'int_', 'long_']
expected = expected[columns]
dropped = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, dropped)
with tm.assertRaises(ValueError):
columns = ['byte_', 'byte_']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
with tm.assertRaises(ValueError):
columns = ['byte_', 'int_', 'long_', 'not_found']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
def test_categorical_writing(self):
original = DataFrame.from_records(
[
["one", "ten", "one", "one", "one", 1],
["two", "nine", "two", "two", "two", 2],
["three", "eight", "three", "three", "three", 3],
["four", "seven", 4, "four", "four", 4],
["five", "six", 5, np.nan, "five", 5],
["six", "five", 6, np.nan, "six", 6],
["seven", "four", 7, np.nan, "seven", 7],
["eight", "three", 8, np.nan, "eight", 8],
["nine", "two", 9, np.nan, "nine", 9],
["ten", "one", "ten", np.nan, "ten", 10]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled', 'unlabeled'])
expected = original.copy()
# these are all categoricals
original = pd.concat([original[col].astype('category') for col in original], axis=1)
expected['incompletely_labeled'] = expected['incompletely_labeled'].apply(str)
expected['unlabeled'] = expected['unlabeled'].apply(str)
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
expected.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
# Silence warnings
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), expected)
def test_categorical_warnings_and_errors(self):
# Warning for non-string labels
# Error for labels too long
original = pd.DataFrame.from_records(
[['a' * 10000],
['b' * 10000],
['c' * 10000],
['d' * 10000]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
with tm.ensure_clean() as path:
tm.assertRaises(ValueError, original.to_stata, path)
original = pd.DataFrame.from_records(
[['a'],
['b'],
['c'],
['d'],
[1]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
with warnings.catch_warnings(record=True) as w:
original.to_stata(path)
tm.assert_equal(len(w), 1) # should get a warning for mixed content
def test_categorical_with_stata_missing_values(self):
values = [['a' + str(i)] for i in range(120)]
values.append([np.nan])
original = pd.DataFrame.from_records(values, columns=['many_labels'])
original = pd.concat([original[col].astype('category') for col in original], axis=1)
original.index.name = 'index'
with | tm.ensure_clean() | pandas.util.testing.ensure_clean |
'''
example of loading FinMind api
'''
from FinMind.Data import Load
import requests
import pandas as pd
url = 'http://finmindapi.servebeer.com/api/data'
list_url = 'http://finmindapi.servebeer.com/api/datalist'
translate_url = 'http://finmindapi.servebeer.com/api/translation'
'''----------------TaiwanStockInfo----------------'''
form_data = {'dataset':'TaiwanStockInfo'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPrice----------------'''
form_data = {'dataset':'TaiwanStockPrice',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPriceMinute----------------'''
form_data = {'dataset':'TaiwanStockPriceMinute',
'stock_id':'2330',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------FinancialStatements----------------'''
form_data = {'dataset':'FinancialStatements',
'stock_id':'2317',
'date':'2019-01-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data = Load.transpose(data)
data.head()
'''----------------TaiwanCashFlowsStatement----------------'''
form_data = {'dataset':'TaiwanCashFlowsStatement',
'stock_id':'2330',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockStockDividend----------------'''
form_data = {'dataset':'TaiwanStockStockDividend',
'stock_id':'2317',
'date':'2018-01-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockMarginPurchaseShortSale----------------'''
form_data = {'dataset':'TaiwanStockMarginPurchaseShortSale',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------InstitutionalInvestorsBuySell----------------'''
form_data = {'dataset':'InstitutionalInvestorsBuySell',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------Shareholding----------------'''
form_data = {'dataset':'Shareholding',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------BalanceSheet----------------'''
form_data = {'dataset':'BalanceSheet',
'stock_id':'2317',
'date':'2019-01-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockHoldingSharesPer----------------'''
form_data = {'dataset':'TaiwanStockHoldingSharesPer',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = | pd.DataFrame(temp['data']) | pandas.DataFrame |
#!/usr/bin/env python
'''mmtfStructure.py
Decode msgpack unpacked data to mmtf structure
'''
__author__ = "<NAME>"
__maintainer__ = "<NAME>) Huang"
__email__ = "<EMAIL>"
__version__ = "0.2.0"
__status__ = "Done"
import numpy as np
import pandas as pd
from mmtfPyspark.utils import mmtfDecoder, MmtfChain, MmtfModel, Codec
class MmtfStructure(object):
def __init__(self, input_data, first_model=False):
"""Decodes a msgpack unpacked data to mmtf structure"""
self.input_data = input_data
self.mmtf_version = mmtfDecoder.get_value(input_data, 'mmtfVersion', required=True)
self.mmtf_producer = mmtfDecoder.get_value(input_data, 'mmtfProducer', required=True)
self.unit_cell = mmtfDecoder.get_value(input_data, 'unitCell')
self.space_group = mmtfDecoder.get_value(input_data, 'spaceGroup')
self.structure_id = mmtfDecoder.get_value(input_data, 'structureId')
self.title = mmtfDecoder.get_value(input_data, 'title')
self.deposition_date = mmtfDecoder.get_value(input_data, 'depositionDate')
self.release_date = mmtfDecoder.get_value(input_data, 'releaseDate')
self.ncs_operator_list = mmtfDecoder.get_value(input_data, 'ncsOperatorList')
self.bio_assembly = mmtfDecoder.get_value(input_data, 'bioAssemblyList') # TODO naming inconsistency
self.entity_list = mmtfDecoder.get_value(input_data, 'entityList')
self.experimental_methods = mmtfDecoder.get_value(input_data, 'experimentalMethods')
self.resolution = mmtfDecoder.get_value(input_data, 'resolution')
self.r_free = mmtfDecoder.get_value(input_data, 'rFree')
self.r_work = mmtfDecoder.get_value(input_data, 'rWork')
self.num_bonds = mmtfDecoder.get_value(input_data, 'numBonds', required=True)
self.num_atoms = mmtfDecoder.get_value(input_data, 'numAtoms', required=True)
self.num_groups = mmtfDecoder.get_value(input_data, 'numGroups', required=True)
self.num_chains = mmtfDecoder.get_value(input_data, 'numChains', required=True)
self._num_models = mmtfDecoder.get_value(input_data, 'numModels', required=True)
self.group_list = mmtfDecoder.get_value(input_data, 'groupList', required=True)
self._bond_atom_list = None
self._bond_order_list = None
self._bondResonanceList = None # TODO
self._x_coord_list = None
self._y_coord_list = None
self._z_coord_list = None
self._b_factor_list = None
self._atom_id_list = None
self._alt_loc_list = None
self._occupancy_list = None
self._sec_struct_list = None
self._group_id_list = None
self._group_type_list = None
self._ins_code_list = None
self._sequence_index_list = None
self._chain_id_list = None
self._chain_name_list = None
self.groups_per_chain = mmtfDecoder.get_value(input_data, 'groupsPerChain', required=True)
self.chains_per_model = mmtfDecoder.get_value(input_data, 'chainsPerModel', required=True)
# calculated atom level data
self._chain_names = None
self._chain_ids = None
self._group_numbers = None
self._group_names = None
self._atom_names = None
self._elements = None
self._chem_comp_types = None
self._polymer = None
self._entity_type = None
self._entity_indices = None
self._sequence_positions = None
# calculated indices
self.groupToAtomIndices = None
self.chainToAtomIndices = None
self.chainToGroupIndices = None
self.modelToAtomIndices = None
self.modelToGroupIndices = None
self.modelToChainIndices = None
self._group_serial = None
self._chain_serial = None
self._chain_entity_index = None
self.chainIdToEntityIndices = None
# precalculate indices
# TODO
self.truncated = False
if first_model and self._num_models != 1:
self.num_models = 1
self.truncated = True
else:
self.num_models = self._num_models
self.decoder = Codec()
self.calc_indices()
self.entityChainIndex = None
self.chain_to_entity_index()
# dataframes
self.df = None
@property
def bond_atom_list(self):
if self._bond_atom_list is not None:
return self._bond_atom_list
elif 'bondAtomList' in self.input_data:
self._bond_atom_list = self.decoder.decode_array(self.input_data['bondAtomList'])
# TODO
# if self.truncated: # truncate bond list ...
return self._bond_atom_list
else:
return None
@property
def bond_order_list(self):
if self._bond_order_list is not None:
return self._bond_order_list
elif 'bondOrderList' in self.input_data:
self._bond_order_list = self.decoder.decode_array(self.input_data['bondOrderList'])
# TODO
# if self.truncated: # truncate bond list ...
return self._bond_order_list
else:
return None
@property
def x_coord_list(self):
if self._x_coord_list is not None:
return self._x_coord_list
elif 'xCoordList' in self.input_data:
self._x_coord_list = self.decoder.decode_array(self.input_data['xCoordList'])
if self.truncated:
return self._x_coord_list[:self.num_atoms]
else:
return self._x_coord_list
else:
return None
@property
def y_coord_list(self):
if self._y_coord_list is not None:
return self._y_coord_list
elif 'yCoordList' in self.input_data:
self._y_coord_list = self.decoder.decode_array(self.input_data['yCoordList'])
if self.truncated:
return self._y_coord_list[:self.num_atoms]
else:
return self._y_coord_list
else:
return None
@property
def z_coord_list(self):
if self._z_coord_list is not None:
return self._z_coord_list
elif 'zCoordList' in self.input_data:
self._z_coord_list = self.decoder.decode_array(self.input_data['zCoordList'])
if self.truncated:
return self._z_coord_list[:self.num_atoms]
else:
return self._z_coord_list
else:
return None
@property
def b_factor_list(self):
if self._b_factor_list is not None:
return self._b_factor_list
elif 'bFactorList' in self.input_data:
self._b_factor_list = self.decoder.decode_array(self.input_data['bFactorList'])
if self.truncated:
return self._b_factor_list[:self.num_atoms]
else:
return self._b_factor_list
else:
return None
@property
def occupancy_list(self):
if self._occupancy_list is not None:
return self._occupancy_list
elif 'occupancyList' in self.input_data:
self._occupancy_list = self.decoder.decode_array(self.input_data['occupancyList'])
if self.truncated:
return self._occupancy_list[:self.num_atoms]
else:
return self._occupancy_list
else:
return None
@property
def atom_id_list(self):
if self._atom_id_list is not None:
return self._atom_id_list
elif 'atomIdList' in self.input_data:
self._atom_id_list = self.decoder.decode_array(self.input_data['atomIdList'])
if self.truncated:
return self._atom_id_list[:self.num_atoms]
else:
return self._atom_id_list
else:
return None
@property
def alt_loc_list(self):
if self._alt_loc_list is not None:
return self._alt_loc_list
elif 'altLocList' in self.input_data:
self._alt_loc_list = self.decoder.decode_array(self.input_data['altLocList'])
if self.truncated:
return self._alt_loc_list[:self.num_atoms]
else:
return self._alt_loc_list
else:
return None
@property
def group_id_list(self):
if self._group_id_list is not None:
return self._group_id_list
elif 'groupIdList' in self.input_data:
self._group_id_list = self.decoder.decode_array(self.input_data['groupIdList'])
if self.truncated:
return self._group_id_list[:self.num_groups]
else:
return self._group_id_list
else:
return None
@property
def group_type_list(self):
if self._group_type_list is not None:
return self._group_type_list
elif 'groupTypeList' in self.input_data:
self._group_type_list = self.decoder.decode_array(self.input_data['groupTypeList'])
if self.truncated:
return self._group_type_list[:self.num_groups]
else:
return self._group_type_list
else:
return None
@property
def sec_struct_list(self):
if self._sec_struct_list is not None:
return self._sec_struct_list
elif 'secStructList' in self.input_data:
self._sec_struct_list = self.decoder.decode_array(self.input_data['secStructList'])
if self.truncated:
return self._sec_struct_list[:self.num_groups]
else:
return self._sec_struct_list
else:
return None
@property
def ins_code_list(self):
if self._ins_code_list is not None:
return self._ins_code_list
elif 'insCodeList' in self.input_data:
self._ins_code_list = self.decoder.decode_array(self.input_data['insCodeList'])
if self.truncated:
return self._ins_code_list[:self.num_groups]
else:
return self._ins_code_list
else:
return None
@property
def sequence_index_list(self):
if self._sequence_index_list is not None:
return self._sequence_index_list
elif 'sequenceIndexList' in self.input_data:
self._sequence_index_list = self.decoder.decode_array(self.input_data['sequenceIndexList'])
if self.truncated:
return self._sequence_index_list[:self.num_groups]
else:
return self._sequence_index_list
else:
return None
@property
def chain_id_list(self):
if self._chain_id_list is not None:
return self._chain_id_list
elif 'chainIdList' in self.input_data:
self._chain_id_list = self.decoder.decode_array(self.input_data['chainIdList'])
if self.truncated:
return self._chain_id_list[:self.num_chains]
else:
return self._chain_id_list
else:
return None
@property
def chain_name_list(self):
if self._chain_name_list is not None:
return self._chain_name_list
elif 'chainNameList' in self.input_data:
self._chain_name_list = self.decoder.decode_array(self.input_data['chainNameList'])
if self.truncated:
return self._chain_name_list[:self.num_chains]
else:
return self._chain_name_list
else:
return None
# calculated atom level data
@property
def chain_names(self):
if self._chain_names is None:
self._chain_names = np.empty(self.num_atoms, dtype=np.object_)
for i in range(self.num_chains):
start = self.chainToAtomIndices[i]
end = self.chainToAtomIndices[i + 1]
self._chain_names[start:end] = self.chain_name_list[i]
return self._chain_names
@property
def chain_ids(self):
if self._chain_ids is None:
self._chain_ids = np.empty(self.num_atoms, dtype=np.object_)
for i in range(self.num_chains):
start = self.chainToAtomIndices[i]
end = self.chainToAtomIndices[i + 1]
self._chain_ids[start:end] = self.chain_id_list[i]
return self._chain_ids
@property
def group_numbers(self):
if self._group_numbers is None:
self._group_numbers = np.empty(self.num_atoms, dtype=np.object_)
for i in range(self.num_groups):
start = self.groupToAtomIndices[i]
end = self.groupToAtomIndices[i + 1]
self._group_numbers[start:end] = f'{self.group_id_list[i]}{self.ins_code_list[i]}'
return self._group_numbers
@property
def group_names(self):
if self._group_names is None:
self._group_names = np.empty(self.num_atoms, dtype=np.object_)
for i in range(self.num_groups):
start = self.groupToAtomIndices[i]
end = self.groupToAtomIndices[i + 1]
index = self.group_type_list[i]
self._group_names[start:end] = self.group_list[index]['groupName']
return self._group_names
@property
def atom_names(self):
if self._atom_names is None:
self._atom_names = np.empty(self.num_atoms, dtype=np.object_)
for i in range(self.num_groups):
start = self.groupToAtomIndices[i]
end = self.groupToAtomIndices[i + 1]
index = self.group_type_list[i]
self._atom_names[start:end] = self.group_list[index]['atomNameList']
return self._atom_names
@property
def elements(self):
if self._elements is None:
self._elements = np.empty(self.num_atoms, dtype=np.object_)
for i in range(self.num_groups):
start = self.groupToAtomIndices[i]
end = self.groupToAtomIndices[i + 1]
index = self.group_type_list[i]
self._elements[start:end] = self.group_list[index]['elementList']
return self._elements
@property
def chem_comp_types(self):
if self._chem_comp_types is None:
self._chem_comp_types = np.empty(self.num_atoms, dtype=np.object_)
for i in range(self.num_groups):
start = self.groupToAtomIndices[i]
end = self.groupToAtomIndices[i + 1]
index = self.group_type_list[i]
self._chem_comp_types[start:end] = self.group_list[index]['chemCompType']
return self._chem_comp_types
@property
def group_serial(self):
if self._group_serial is None:
self._group_serial = np.empty(self.num_atoms, dtype=np.object_)
for i in range(self.num_groups):
start = self.groupToAtomIndices[i]
end = self.groupToAtomIndices[i + 1]
self._group_serial[start:end] = i
return self._group_serial
@property
def polymer(self):
if self._polymer is None:
self._polymer = np.empty(self.num_atoms, dtype=np.bool)
for i in range(self.num_chains):
start = self.chainToAtomIndices[i]
end = self.chainToAtomIndices[i + 1]
index = self.entityChainIndex[i]
self._polymer[start:end] = self.entity_list[index]['type'] == 'polymer'
return self._polymer
@property
def entity_types(self):
if self._entity_type is None:
self._entity_type = np.empty(self.num_atoms, dtype=np.object_)
for i in range(self.num_chains):
start = self.chainToAtomIndices[i]
end = self.chainToAtomIndices[i + 1]
index = self.entityChainIndex[i]
self._entity_type[start:end] = self.entity_list[index]['type']
return self._entity_type
@property
def entity_indices(self):
if self._entity_indices is None:
self._entity_indices = np.empty(self.num_atoms, dtype=np.int32)
for i in range(self.num_chains):
start = self.chainToAtomIndices[i]
end = self.chainToAtomIndices[i + 1]
self._entity_indices[start:end] = self.entityChainIndex[i]
return self._entity_indices
@property
def chain_serial(self):
if self._chain_serial is None:
self._chain_serial = np.empty(self.num_atoms, dtype=np.int32)
for i in range(self.num_chains):
start = self.chainToAtomIndices[i]
end = self.chainToAtomIndices[i + 1]
self._chain_serial[start:end] = i
return self._chain_serial
@property
def sequence_positions(self):
if self._sequence_positions is None:
self._sequence_positions = np.empty(self.num_atoms, dtype=np.int32)
for i in range(self.num_groups):
start = self.groupToAtomIndices[i]
end = self.groupToAtomIndices[i + 1]
self._sequence_positions[start:end] = self.sequence_index_list[i]
return self._sequence_positions
def to_pandas(self, add_cols=None, multi_index=False):
if self.df is None:
self.calc_core_group_data()
self.df = pd.DataFrame({'chain_name': self.chain_names,
'chain_id': self.chain_ids,
'group_number': self.group_numbers,
'group_name': self.group_names,
'atom_name': self.atom_names,
'altloc': self.alt_loc_list,
'x': self.x_coord_list,
'y': self.y_coord_list,
'z': self.z_coord_list,
'o': self.occupancy_list,
'b': self.b_factor_list,
'element': self.elements,
'polymer:': self.polymer
})
if add_cols is not None:
if 'sequence_position' in add_cols:
self.df['sequence_position'] = | pd.Series(self.sequence_positions, index=self.df.index) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# Pancancer_Aberrant_Pathway_Activity_Analysis scripts/viz/targene_cell_line_predictions.py
# # Cell Line Analysis
#
# We sought to validate the targene classifier trained on TCGA pan-cancer data by generating predictions on cell line data. A good classifier should generalize to predicting targene status in other samples. We apply the classifier on two datasets:
#
# 1. [Cancer Cell Line Encyclopedia (CCLE)](https://software.broadinstitute.org/software/cprg/?q=node/11) Gene Expression data.
# * 1020 cell lines with matching gene expression and mutation calls
# * Pharmacologic profiling of 24 drugs over 504 cell lines
# 2. GDSC data: These data were accessed via publicly available resources with help from links in the [UCSD-CCAL Onco-GPS github repository](https://github.com/UCSD-CCAL/onco-gps-paper-analysis)
# data from : A landscape of pharmacogenomic interactions in cancer', Iorio F et al. Cell. 2016
# (https://www.cancerrxgene.org/gdsc1000/GDSC1000_WebResources//Data/preprocessed/Cell_line_RMA_proc_basalExp.txt.zip) Gene Expression data.
# 390/1000 cell lines with matching gene expression and mutation calls from CCLE data
# Pharmacologic profiling of ~500 drugs over 1000 cell lines from two different studies GDSC1 and GDSC2 datasets
# GDSC1_data: ftp://ftp.sanger.ac.uk/pub/project/cancerrxgene/releases/current_release/GDSC1_fitted_dose_response_15Oct19.xlsx
# GDSC1_data: ftp://ftp.sanger.ac.uk/pub/project/cancerrxgene/releases/current_release/GDSC2_fitted_dose_response_15Oct19.xlsx
# we replaced all the GDSC celllines with CCLE cell line names and for convenient processing.
import os
import sys
import numpy as np
import pandas as pd
from decimal import Decimal
from scipy.stats import ttest_ind
from statsmodels.stats.proportion import proportions_chisquare
from sklearn.preprocessing import StandardScaler
from Bio.SeqUtils import IUPACData
import matplotlib.pyplot as plt
import seaborn as sns
import plotnine as gg
import argparse
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'papaa'))
from tcga_util import add_version_argument
# Store protein change dictionary
aa = IUPACData.protein_letters_1to3_extended
#get_ipython().run_line_magic('matplotlib', 'inline')
parser = argparse.ArgumentParser()
add_version_argument(parser)
parser.add_argument('-t', '--targenes', default= 'ERBB2_MUT,PIK3CA_MUT,KRAS_MUT,AKT1_MUT',
help='string of the genes to extract or gene list file')
parser.add_argument('-p', '--path_genes',
help='pathway gene list file')
parser.add_argument('-c', '--classifier_summary', default= None,
help='location of classifier_summary file')
parser.add_argument('-r', '--ccle_rnaseq',default= None,
help='path for ccle_rnaseq data file')
parser.add_argument('-m', '--ccle_mut',
help='path for ccle mutational data file')
parser.add_argument('-a', '--ccle_maf',
help='path for ccle variant data file')
parser.add_argument('-n', '--gdsc_rnaseq',default= None,
help='path for gdsc_rnaseq data file')
parser.add_argument('-u', '--gdsc_mut',
help='path for gdsc/ccle common mutational data file')
parser.add_argument('-e', '--gdsc1_phar',
help='path for GDSC1 pharmacological data file')
parser.add_argument('-f', '--gdsc2_phar',
help='path for GDSC2 pharmacological data file')
args = parser.parse_args()
# Load PI3K_gain Classifier Coefficients
# classifier_file = os.path.join('..', 'classifiers', 'ERBB2_PIK3CA_KRAS_AKT1', 'classifier_summary.txt')
# with open(classifier_file) as class_fh:
# for line in class_fh:
# line = line.strip().split('\t')
# if line[0] == 'Coefficients:':
# all_coef_df = pd.read_table(os.path.join('..', line[1]), index_col=0)
# Only non-zero coefficients contribute to model performance
classifier = args.classifier_summary
classifier_file = os.path.join(classifier , "classifier_summary.txt")
all_coef_df = pd.read_table(os.path.join( classifier , "classifier_coefficients.tsv"), index_col=0)
coef_df = all_coef_df[all_coef_df['abs'] > 0]
coef_df.head(10)
# ## Part 1: CCLE
#
# Note - This data was also retrieved from the Onco-GPS paper analysis repository
#ccle_file_name = os.path.join('..', '..', 'onco-gps-paper-analysis', 'data',
# 'rpkm__gene_x_ccle_cellline.gct')
ccle_file_name = args.ccle_rnaseq
ccle_df = pd.read_table(ccle_file_name, skiprows=2, index_col=0)
#ccle_df = ccle_df.drop_duplicates(subset='Description',keep = 'first')
ccle_df = ccle_df[~ccle_df.index.duplicated()]
# Subset to common genes in the classifier and CCLE data
common_genes = list(set(coef_df['feature']) & set(ccle_df.index))
common_ccle_coef = coef_df[coef_df['feature'].isin(common_genes)]
ccle_df = ccle_df.loc[common_ccle_coef['feature'], ccle_df.columns[1:]]
scaled_fit = StandardScaler().fit(ccle_df.T)
ccle_df = pd.DataFrame(scaled_fit.transform(ccle_df.T),
index=ccle_df.columns,
columns=ccle_df.index)
ccle_df = ccle_df.T
# Get the weights ready for applying the classifier
apply_weights = pd.DataFrame(common_ccle_coef['weight'])
apply_weights.index = common_ccle_coef.feature
# Apply a logit transform [y = 1/(1+e^(-wX))] to output probabilities
result_ccle = apply_weights.T.dot(ccle_df)
result_ccle = 1 / (1 + np.exp(-1 * result_ccle))
# Distribution of predictions of the Targene Classifier applied to CCLE data
result_ccle.T.hist();
r = os.path.join(classifier,'figures','ccle_histogram.png')
plt.savefig(r)
plt.close()
# Load CCLE Mutation Data
#ccle_mut_file_name = os.path.join('..', '..', 'onco-gps-paper-analysis', 'data',
# 'mutation__gene_x_ccle_cellline.gct')
ccle_mut_file_name = args.ccle_mut
ccle_all_mut_df = pd.read_table(ccle_mut_file_name, skiprows=2, index_col=0)
# Load CCLE Variant Data
#ccle_maf_file = 'https://data.broadinstitute.org/ccle/CCLE_DepMap_18Q1_maf_20180207.txt'
ccle_maf_file = args.ccle_maf
ccle_maf_df = pd.read_table(ccle_maf_file, index_col=15)
targenes = args.targenes.split(',')
targene_status = ccle_all_mut_df.loc[targenes, :].T.apply(max, axis=1)
# Identify all cell lines with mutations in given targenes
ccle_mut_df = (
ccle_all_mut_df.loc[targenes, :].T
.assign(targene_status=targene_status).drop(['Description'])
)
# Join classifier scores with mutation status
ccle_full_df = ccle_mut_df.join(result_ccle.T).dropna()
ccle_full_df = ccle_full_df.assign(sample_name = ccle_full_df.index)
ccle_full_df = ccle_full_df.sort_values(by='weight', ascending=False)
ccle_full_df.index.name = 'cell_line'
# Write CCLE Scores to file
results_folder = os.path.join(classifier, 'results')
if not os.path.exists(results_folder):
os.makedirs(results_folder)
ccle_scores_file = os.path.join(classifier, 'results', 'ccle_targene_classifier_scores.tsv')
ccle_full_df.to_csv(ccle_scores_file, sep='\t')
# ### Perform a t-test on classifier weights across groups
# targene mutant vs. targene wildtype
targene_mutant = ccle_full_df[ccle_full_df['targene_status'] == 1]
targene_wt = ccle_full_df[ccle_full_df['targene_status'] == 0]
# Output t-test results
t_results_ccle_targene = ttest_ind(a = targene_mutant['weight'],
b = targene_wt['weight'], equal_var = False)
print('targene Status:')
print(t_results_ccle_targene)
# Use Seaborn for the 2nd plot
import seaborn as sns
import matplotlib.pyplot as pl
sns.set_style("whitegrid")
sns.set_context("paper", rc={"font.size":11, "axes.titlesize":11, "axes.labelsize":16,
'xtick.labelsize':11, 'ytick.labelsize':11, 'figure.facecolor': 'white'})
cell_line_folder = os.path.join(classifier, 'figures', 'cell_line')
if not os.path.exists(cell_line_folder):
os.makedirs(cell_line_folder)
# Plot Results for targene alone
x1, x2 = 0, 1
y1, y2,h = 1.05, 1.0, 0.03
plt.rcParams['figure.figsize']=(3.5, 4)
ax2 = sns.boxplot(x="targene_status", y="weight", data=ccle_full_df,
palette = {0: "lightgreen",1: 'yellow'},
fliersize=0)
ay2 = sns.stripplot(x='targene_status', y='weight', data=ccle_full_df,
dodge=False,
palette = {0: "blue", 1: 'red'},
jitter=0.12, size=2, alpha=0.65)
ax2.axes.set_ylim(0, 1.2)
ax2.set_yticklabels([0, 0.2, 0.4, 0.6, 0.8, 1,''])
ax2.set_xticklabels(['Targene_WT', 'Targene_Mut'])
ax2.set_ylabel('Targene Classifier Score')
ax2.set_xlabel('CCLE Data')
ax2.legend
pl.axhline(0.5, color='black', linestyle='dashed', linewidth=1)
ay2.axes.set_ylim(0, 1.2)
# Add targene T-Test Results
pl.plot([x1, x1, x2, x2], [y1, y1+h, y1+h, y1], lw=1.2, c='black')
pl.text(.6, y1+h, "{:.2E}".format(Decimal(t_results_ccle_targene.pvalue)),
ha='center', va='bottom', color="black")
pl.tight_layout()
ccle_fig_file = os.path.join(classifier, 'figures', 'cell_line', 'ccle_targene_WT_MUT_predictions.pdf')
pl.savefig(ccle_fig_file)
plt.close()
# ### What percentage of correct classifications in CCLE data?
# Assign a label to what the predictions are given classifier scores
ccle_full_df = ccle_full_df.assign(predictions = 'wild-type')
ccle_full_df.loc[ccle_full_df['weight'] > 0.5, 'predictions'] = 'mutant'
# Stratify cell lines based on predictions and ground truth status
positive_targene_predictions_ccle = ccle_full_df[ccle_full_df['weight'] > 0.5]
negative_targene_predictions_ccle = ccle_full_df[ccle_full_df['weight'] <= 0.5]
positive_targene_lines_ccle = ccle_full_df[ccle_full_df['targene_status'] == 1]
negative_targene_lines_ccle = ccle_full_df[ccle_full_df['targene_status'] == 0]
# Of wild-type targene cell lines, how many are predicted correctly?
# True Negative Rate, Specificity
negative_targene_lines_ccle['predictions'].value_counts()
# Of mutated targene cell lines, how many are predicted correctly?
# True Positive Rate (TPR), Recall, Sensitivity
positive_targene_lines_ccle['predictions'].value_counts()
# Of the wild-type predictions, how many are actually wild-type?
# Negative Predictive Value (NPV)
neg_ccle_results = negative_targene_predictions_ccle['targene_status'].value_counts()
true_neg = neg_ccle_results[0]
predicted_condition_neg = neg_ccle_results.sum()
print('{} out of {} Targene wild-type predictions '
'are true ({:.1f}%)'.format(true_neg, predicted_condition_neg,
true_neg * 100 / predicted_condition_neg))
# Of the mutated predictions, how many are actually mutated?
# Positive Predictive Value (PPV) -or- precision
pos_ccle_results = positive_targene_predictions_ccle['targene_status'].value_counts()
false_pos, true_pos = pos_ccle_results
predicted_condition_pos = pos_ccle_results.sum()
print('{} out of {} Targene mutation predictions '
'are true ({:.1f}%)'.format(true_pos, predicted_condition_pos,
true_pos * 100 / predicted_condition_pos))
total_correct = true_pos + true_neg
print('{} of {} Total cell lines '
'predicted correctly ({:.1f}%)'.format(total_correct, ccle_full_df.shape[0],
total_correct * 100 / ccle_full_df.shape[0]))
# ### Add CCLE Variant Scores (nucleotide and amino acid) to Supplementary Data Files
# Load TCGA PanCanAtlas Core targene Pathway genes
path_genes_file = args.path_genes
path_core_df = | pd.read_table(path_genes_file) | pandas.read_table |
import os
import random
import logging
import torch
import re
import json
import locale
import unidecode
import string
import numpy as np
import pandas as pd
import seaborn as sns
from IPython.display import Markdown, display
from operator import itemgetter
from datetime import datetime, timedelta
from random import random, randint, choice, seed
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm
from faker import Faker
from transformers import get_linear_schedule_with_warmup
from transformers import AutoConfig, AutoModelForTokenClassification, AutoTokenizer
from sklearn.metrics import precision_score as sk_precision_score, recall_score as sk_recall_score, \
f1_score as sk_f1_score, confusion_matrix as sk_confusion_matrix
from utils import convert_to_features, switch_entity, find_sub_list, get_text_and_labels
from focal_loss import FocalLoss
class Ner:
def __init__(self, _inputs, log_level=logging.INFO):
self.logger = logging.getLogger(__name__)
self.log_level = log_level
self.inputs = _inputs
self.pad_token_label_id = CrossEntropyLoss().ignore_index
self.set_seed(_inputs["seed"])
self.list_entities = _inputs["entities"]
self.underlines = {
ent: '#%02x%02x%02x' % (int(sns.color_palette('pastel', len(self.list_entities))[i][0] * 255),
int(sns.color_palette('pastel', len(self.list_entities))[i][1] * 255),
int(sns.color_palette('pastel', len(self.list_entities))[i][2] * 255))
for i, ent in enumerate(self.list_entities)}
self.list_regex = _inputs["regex"]
self.max_seq_length = _inputs["max_seq_length"]
self.per_gpu_batch_size = _inputs["per_gpu_batch_size"]
self.model_path = _inputs["model_path"]
self.tokenizer_path = _inputs["tokenizer_path"]
self.labels_format = _inputs["labels_format"]
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Initialisation des paramètres
self.adam_epsilon, self.learning_rate, self.max_steps, self.gradient_accumulation_steps, self.num_train_epochs,\
self.max_grad_norm, self.warmup_steps, self.weight_decay, self.white_space_token, self.loss_function, \
self.output_dir = [None] * 11
def evaluate_model(self, corpus):
"""
Evaluation du modèle pour les entités précisées.
:param corpus: DataFrame du corpus à utiliser pour l'évaluation
"""
# Loading labels
labels, labels_weight = self.load_labels(None)
# Loading model and tokenizer
model, tokenizer = self.load_model_and_tokenizer(labels)
# Evaluation
eval_dataset = self.load_and_cache_texts(corpus, tokenizer, labels)
# Save config and logs
self.save_config_and_logs()
model.to(self.device)
result, _ = self.run_predict_and_eval(eval_dataset, model, tokenizer, labels, self.model_path)
def evaluate_and_display_results(self, eval_loss, real_labels, predicted_labels, labels, no_saving, model_file):
"""
Evalue les performances d'un modèle et sauvegarde les résultats dans le dossier du modèle.
:param eval_loss: valeur moyenne de la fonction de perte
:param real_labels: liste des labels correspondant aux tokens du texte
:param predicted_labels: liste des labels prédits par l'algorithme
:param labels: liste des différents labels possibles
:param no_saving: booléen précisant si les résultats de l'évaluation doivent etre enregistrés ou non
:param model_file: chemin vers le modèle évalué
return: résultats de l'évaluation sous forme de dictionnaire
"""
# Computes metrics
results = self.get_scores(real_labels, predicted_labels, labels, eval_loss)
# Displays results and saves them to a file
# for key in sorted(results.keys()):
# self.logger.info(" %s = %s", key, str(results[key]))
self.logger.info("1. results by entity\n")
for ent in self.list_entities:
end_of_line = "\n" if ent == self.list_entities[-1] else ""
self.logger.info("\t%s : %s%s", ent, str(results[ent]), end_of_line)
self.logger.info("2. global results\n")
other_keys = set(results.keys()) - set(self.list_entities) - {"confusion_matrix"}
for key in other_keys:
end_of_line = "\n" if key == list(other_keys)[-1] else ""
self.logger.info("\t%s = %s%s", key, str(results[key]), end_of_line)
self.logger.info("3. confusion matrix\n")
self.logger.info("\t%s\n", str(results["confusion_matrix"]))
# Saves results
if not no_saving:
output_eval_file = model_file.replace('.pt', '_eval_results.txt')
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
return results
def extract_info_from_batch(self, tokenizer, batch, _output_probabilities, label_map, threshold=None):
"""
Extraction des différentes informations contenues dans un batch de données.
:param tokenizer: tokenizer du modèle
:param batch: batch de données
:param _output_probabilities: probabilités des différentes classes données par l'algorithme
:param label_map: dictionnaire de correspondance entre les libellés des labels et leurs identifiants
:param threshold: seuils associés à chaque classe. Si la probabilité de sortie dépasse ce seuil, on considère
que l'algorithme l'a prédite meme si ce n'est pas la probabilité maximale.
:return:
"""
token_2_ignore = [tokenizer.sep_token, tokenizer.cls_token, tokenizer.pad_token]
token_ids_2_ignore = [tokenizer.sep_token_id, tokenizer.cls_token_id, tokenizer.pad_token_id]
# Extract texts and predicted labels
text_tokens = [[tokenizer.convert_ids_to_tokens(int(x)) for x in y] for y in batch[0]]
labels_probabilities = _output_probabilities.detach().to("cpu").numpy()
predicted_labels_ids = np.argmax(labels_probabilities, axis=2)
# Using manual threshold
if threshold is not None:
for i, row in enumerate(labels_probabilities):
for j, token in enumerate(row):
if any([x >= threshold[ind] for ind, x in enumerate(token)][1:]) and np.argmax(token) == 0:
_rescaled_tokens = [x if (ind != 0) and (x >= threshold[ind]) else -1000 for ind, x in
enumerate(token)]
predicted_labels_ids[i][j] = np.argmax(_rescaled_tokens)
predicted_labels = [[label_map[x] for x in y] for y in predicted_labels_ids]
# Delete functional tokens
labels_probabilities = [[", ".join([str(z) for z in y]) for y in x] for x in labels_probabilities]
_joined = [[(x, y, z) for x, y, z in zip(text_tokens[i], predicted_labels[i], labels_probabilities[i]) if
x not in token_2_ignore]
for i in range(len(text_tokens))]
_valid_examples = [i for i, x in enumerate(_joined) if len(x) > 0]
_joined = [list(zip(*_joined[i])) for i in _valid_examples]
text_tokens = [list(x[0]) for x in _joined]
predicted_labels = [list(x[1]) for x in _joined]
labels_probabilities = [list(x[2]) for x in _joined]
# Extract real labels
real_labels = [[label_map[int(x)] for x in y if x != self.pad_token_label_id] for y in batch[3]]
real_labels = [x for i, x in enumerate(real_labels) if i in _valid_examples]
# Extract file names
file_tokens = [[tokenizer.convert_ids_to_tokens(int(x)) for x in y if x not in token_ids_2_ignore] for y in
batch[4]]
files = ["".join([x.replace('▁', ' ') for x in y]).strip() for y in file_tokens]
files = [x for i, x in enumerate(files) if i in _valid_examples]
# Extract text part
text_parts = [int(x) for x in batch[5]]
text_parts = [x for i, x in enumerate(text_parts) if i in _valid_examples]
return files, text_parts, text_tokens, real_labels, predicted_labels, labels_probabilities
def find_regex_entities(self, corpus):
"""
Détection des entités repérées par des expressions régulières et remplacement des tags correspondant par le
label adéquat.
param corpus: corpus de textes
return : corpus avec nouveaux labels
"""
func_dic = {"TIME": self.regex_time, "PHONE": self.regex_phone, "IMMAT": self.regex_immat,
"EMAIL": self.regex_email}
for regex in self.list_regex:
corpus = corpus.apply(lambda x: func_dic[regex](x), axis=1)
return corpus
@staticmethod
def get_pos_class_freq(train_df):
"""
Calcule le vecteur de poids pour la dernière couche du réseau, après l'encodeur. Le poids de chaque classe de
sortie est inversement proportionnel à la fréquence de la classe dans le dataset d'entrainement.
:param train_df: DataFrame du corpus d'entrainement
:return: dictionnaire associant un poids à chaque classe
"""
count_df = pd.Series([y for x in train_df.labels.values for y in x]).value_counts().reset_index()
return {e[0]: e[1] for e in count_df[['index', 0]].values}
def get_scores(self, real_labels, predicted_labels, labels, eval_loss):
"""
Calcul des performances du modèle (f1, rappel et précision) au global et pour chaque entité.
:param real_labels: liste des labels correspondant aux tokens du texte
:param predicted_labels: liste des labels prédits par l'algorithme
:param labels: liste des différents labels possibles
:param eval_loss: valeur moyenne de la fonction de perte
:return: dictionnaire des performances de l'algorithme.
"""
_s_labels = list(sorted(labels))
_flat_real_labels = [x for y in real_labels for x in y]
_flat_predicted_labels = [x for y in predicted_labels for x in y]
_flat_real_labels_type_only = [x.split("-")[-1] for y in real_labels for x in y]
_flat_predicted_labels_type_only = [x.split("-")[-1] for y in predicted_labels for x in y]
_labels_type_only = list(set([x.split("-")[-1] for x in labels if x != 'O']))
cm = sk_confusion_matrix(_flat_real_labels, _flat_predicted_labels, labels=_s_labels)
cm = np.concatenate((np.transpose(np.array([[''] + _s_labels])), np.concatenate((np.array([_s_labels]),
cm), axis=0)), axis=1)
results = {
"loss": eval_loss,
"precision (entity type only)": sk_precision_score(_flat_real_labels_type_only,
_flat_predicted_labels_type_only,
labels=_labels_type_only, average='micro',
zero_division=0),
"precision (BIO labels)": sk_precision_score(_flat_real_labels, _flat_predicted_labels,
labels=[x for x in labels if x != "O"], average='micro',
zero_division=0),
"recall (entity type only)": sk_recall_score(_flat_real_labels_type_only, _flat_predicted_labels_type_only,
labels=_labels_type_only, average='micro', zero_division=0),
"recall (BIO labels)": sk_recall_score(_flat_real_labels, _flat_predicted_labels,
labels=[x for x in labels if x != "O"], average='micro',
zero_division=0),
"f1 (entity type only)": sk_f1_score(_flat_real_labels_type_only, _flat_predicted_labels_type_only,
labels=_labels_type_only, average='micro', zero_division=0),
"f1 (BIO labels)": sk_f1_score(_flat_real_labels, _flat_predicted_labels,
labels=[x for x in labels if x != "O"], average='micro', zero_division=0),
"confusion_matrix": cm
}
for ent in self.list_entities:
_preds = [1 if x == ent else 0 for x in _flat_predicted_labels_type_only]
_reals = [1 if x == ent else 0 for x in _flat_real_labels_type_only]
results[ent] = f"precision: {sk_precision_score(_reals, _preds, zero_division=0)}, " \
f"recall: {sk_recall_score(_reals, _preds, zero_division=0)}"
return results
def get_corpus_stats(self, corpus):
"""
Ajoute aux logs les caractéristiques du corpus traité.
:param corpus: DataFrame du corpus de textes
"""
_global = f"{len(corpus)} textes dans le corpus, soit {sum([len(x) for x in corpus.text.tolist()])} tokens.\n"
_per_entity = "Nombre d'entités:\n"
for ent in self.list_entities:
_per_entity += f"\t- {ent} : {[x for y in corpus.labels.tolist() for x in y].count(ent)}\n"
self.logger.info("%s\n%s", _global, _per_entity)
def load_model_and_tokenizer(self, labels):
"""
Chargement du modèle et du tokenizer associé.
:param labels: liste des différents labels possibles
:return: modèle et tokenizer
"""
if self.model_path.endswith(".pt"):
if self.device.type == "cpu":
model = torch.load(self.model_path, map_location=torch.device('cpu'))
else:
model = torch.load(self.model_path)
tokenizer_path = os.path.join(self.tokenizer_path, model.__class__.__name__)
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
self.white_space_token = tokenizer.tokenize("le")[0].replace("le", "")
else:
config_file = os.path.join(self.model_path, "config.json")
config = AutoConfig.from_pretrained(config_file)
config.num_labels = len(labels)
model = AutoModelForTokenClassification.from_config(config)
tokenizer = AutoTokenizer.from_pretrained(self.model_path)
self.white_space_token = tokenizer.tokenize("le")[0].replace("le", "")
return model, tokenizer
def load_labels(self, train_df):
"""
Génère les labels en fonction du format choisi ainsi que leurs poids en fonction de leur fréquence
d'apparition dans le corpus.
:param train_df: corpus de textes d'entrainement
:return: liste des labels et poids correspondants
"""
if self.labels_format == "BIO":
labels = ["O"] + [y for z in [[f"B-{x}", f"I-{x}"] for x in self.list_entities] for y in z]
else:
labels = ["O"] + self.list_entities
# Les poids des différents labels sont calculés à partir de leur fréquence d'apparition.
if (train_df is None) or (len(train_df) == 0):
labels_weights = [1 for _ in labels]
# Si l'on veut uniquement faire des prédictions, on peut se contenter d'un vecteur de poids constant
else:
freqs = self.get_pos_class_freq(train_df)
labels_weights = np.array([freqs.get(key, None) for key in labels], dtype=np.float64)
labels_weights = [np.nanmax(labels_weights) / x if not np.isnan([x]) else np.nanmax(labels_weights) for x in
labels_weights]
labels_weights = [np.log(x) if x != 1 else x for x in labels_weights]
labels_weights = torch.tensor(labels_weights).float()
labels_weights = labels_weights.to(device=self.device)
return labels, labels_weights
def load_and_cache_texts(self, corpus_df, tokenizer, labels):
"""
Charge les différents textes du corpus dans un TensorDataset.
:param corpus_df: DataFrame du corpus de textes
:param tokenizer: tokeinzer associé au modèle prédictif
:param labels: liste des différents labels possibles
:return:TensorDataset du corpus
"""
tokenizer_special_tokens = {"cls_token": tokenizer.cls_token, "cls_token_segment_id": 0,
"sep_token": tokenizer.sep_token, "sep_token_extra": False,
"pad_on_left": False, "cls_token_at_end": False,
"pad_token": tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
"pad_token_segment_id": 0, "pad_token_label_id": self.pad_token_label_id,
"sequence_a_segment_id": 0, "mask_padding_with_zero": True}
features = convert_to_features(corpus_df, labels, self.max_seq_length, tokenizer, tokenizer_special_tokens)
# Convert to Tensors and build dataset
all_text_token_ids = torch.tensor([f.text_token_ids for f in features], dtype=torch.long)
all_text_mask = torch.tensor([f.text_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
all_file_token_ids = torch.tensor([f.file_token_ids for f in features], dtype=torch.long)
all_text_parts_ids = torch.tensor([f.text_part_index for f in features], dtype=torch.long)
dataset = TensorDataset(all_text_token_ids, all_text_mask, all_segment_ids, all_label_ids, all_file_token_ids,
all_text_parts_ids)
return dataset
def loss_with_weights(self, labels, attention_mask, preds, labels_weights):
"""
Calcule la fonction de perte (Focal loss ou Cross Entropy loss) en prenant en compte les poids associés à chaque
catégorie.
:param labels: labels associés à chaque token
:param attention_mask: masque d'attention
:param preds: prédictions de l'algorithme pour chaque token
:param labels_weights: poids associées à chaque classe
:return: perte
"""
loss = None
if labels is not None:
if self.loss_function == "FocalLoss":
loss_fct = FocalLoss(alpha=labels_weights, gamma=2)
else:
loss_fct = CrossEntropyLoss(labels_weights)
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = preds.view(-1, len(labels_weights))
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(preds.view(-1, len(labels_weights)), labels.view(-1))
return loss
def parse_input_json(self):
"""
Récupère les paramètres du fichier d'entrée au format json et les assigne à des variables de classe.
"""
try:
# Paramètres de configuration liés à l'entraînement (facultatifs pour de la prédiction ou de l'évaluation)
self.adam_epsilon = self.inputs["adam_epsilon"]
self.learning_rate = self.inputs["learning_rate"]
self.max_steps = self.inputs["max_steps"]
self.gradient_accumulation_steps = self.inputs["gradient_accumulation_steps"]
self.num_train_epochs = self.inputs["num_train_epochs"]
self.max_grad_norm = self.inputs["max_grad_norm"]
self.warmup_steps = self.inputs["warmup_steps"]
self.weight_decay = self.inputs["weight_decay"]
self.loss_function = self.inputs["loss_function"]
except (Exception,):
_mandatory_parameters = ["adam_epsilon", "learning_rate", "max_seq_length", "max_steps",
"gradient_accum, _steps", "num_train_epochs", "max_grad_norm",
"per_gpu_batch_size", "warmup_steps", "weight_decay",
"loss_function", "output_dir"]
_missing_ones = [x for x in _mandatory_parameters if x not in self.inputs.keys()]
self.logger.error(f"Missing training parameter(s): {_missing_ones}")
def predict_with_model(self, corpus, threshold):
"""
Détecte les entités nommées voulues dans un corpus donné.
:param corpus: DataFrame de corpus de textes
:param threshold: seuils de détection manuels. Si la probabilité d'une entité dépasse ce seuil, on prédit
cette entité meme si elle ne correspond pas à la probabilité maximale.
return: DataFrame du corpus enrichi des annotations
"""
# Loading labels
labels, labels_weight = self.load_labels(None)
# Loading model and tokenizer
model, tokenizer = self.load_model_and_tokenizer(labels)
# Evaluation
predict_dataset = self.load_and_cache_texts(corpus, tokenizer, labels)
model.to(self.device)
_, processed_corpus = self.run_predict_and_eval(predict_dataset, model, tokenizer, labels, None,
no_evaluation=True, threshold=threshold)
return processed_corpus
@staticmethod
def regex_immat(row):
"""
Finds immats in texts using REGEX rules.
:param row: DataFrame row
:return: pd.Series with file, text and label columns
"""
# Loads text
raw_ppel = row["raw_text"]
# REGEX immat patterns and exceptions
regex_pattern = r"[\s\"\''\(\,\.][a-zA-Z]{2}[\s\.-]?[0-9]{3}[\s\.-]?[a-zA-Z]{2}[\s\"\''\)\,\.]"
exceptions = ['de', 'et', 'le', 'go']
# Finds immat patterns
plaque = []
for _immat in re.finditer(regex_pattern, raw_ppel):
s = _immat.start()
e = _immat.end()
if not ((raw_ppel[s + 1:s + 3] in exceptions) and (raw_ppel[e - 3:e - 1] in exceptions)):
plaque.append(raw_ppel[s + 1:e - 1])
# Creates labels
splitted_text = row["text"]
if "predicted_labels" in row.keys():
bio_tags = row["predicted_labels"]
else:
bio_tags = ["O" for _ in row["labels"]]
plaque = [x.split(" ") for x in plaque]
_ppel = splitted_text.copy()
for _immat in plaque:
ind = find_sub_list(_immat, _ppel)
if ind is None:
ind = find_sub_list(_immat, _ppel, strict=False)
if ind is None:
print(f"entity {_immat} not found in text")
continue
for i, _tag in zip(ind, _immat):
bio_tags[i] = 'IMMAT'
_ppel = [None for _ in _ppel[:ind[0] + len(_immat)]] + _ppel[min(len(_ppel), ind[0] + len(_immat)):]
return pd.Series(
{"file": row["file"], "raw_text": row["raw_text"], "text": splitted_text, "labels": row["labels"],
"predicted_labels": bio_tags})
@staticmethod
def regex_email(row):
"""
Finds e-mails in texts using REGEX rules.
:param row: DataFrame row
:return: pd.Series with file, text and label columns
"""
# Loads text
raw_ppel = row["raw_text"]
# REGEX time patterns
regex_pattern = r"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)"
# Finds e-mail patterns
emails = []
for _mail in re.finditer(regex_pattern, raw_ppel):
s = _mail.start()
e = _mail.end()
if raw_ppel[e - 1] == '.':
emails.append(raw_ppel[s:e - 1])
else:
emails.append(raw_ppel[s:e])
# Creates labels
splitted_text = row["text"]
if "predicted_labels" in row.keys():
bio_tags = row["predicted_labels"]
else:
bio_tags = ["O" for _ in row["labels"]]
emails = [x.split(" ") for x in emails]
_ppel = splitted_text.copy()
for _mail in emails:
ind = find_sub_list(_mail, _ppel, strict=False)
if ind is None:
print(f"entity {_mail} not found in text")
continue
for i, _tag in zip(ind, _mail):
bio_tags[i] = 'EMAIL'
_ppel = [None for _ in _ppel[:ind[0] + len(_mail)]] + _ppel[min(len(_ppel), ind[0] + len(_mail)):]
return pd.Series(
{"file": row["file"], "raw_text": row["raw_text"], "text": splitted_text, "labels": row["labels"],
"predicted_labels": bio_tags})
@staticmethod
def regex_phone(row):
"""
Finds phone numbers in texts using REGEX rules.
:param row: DataFrame row
:return: pd.Series with file, text and label columns
"""
# Loads text
raw_ppel = row["raw_text"]
# REGEX time patterns
regex_pattern = [
r"[\s\"\''\(\,\.]0[0-9][\s\.-]?([0-9]{2}[\s\.-]?){3}[0-9]{2}[\s\"\''\)\,\.]",
r"[\s\"\''\(\,\.]\+[0-9]{1,4}[\s\.-]?[0-9][\s\.-]?([0-9]{2}[\s\.-]?){3}[0-9]{2}[\s\"\''\)\,\.]",
r"[\s\"\''\(\,\.][0-9]{4}[\s\.-][0-9]{3}[\s\.-][0-9]{3}[\s\"\''\)\,\.]"
]
# Finds phone number patterns
phones = []
for pattern in regex_pattern:
for _phone in re.finditer(pattern, raw_ppel):
s = _phone.start() + 1
e = _phone.end() - 1
phones.append((s, raw_ppel[s:e].strip()))
phones.sort(key=itemgetter(0))
phones = [x[1] for x in phones]
# Creates labels
splitted_text = row["text"]
if "predicted_labels" in row.keys():
bio_tags = row["predicted_labels"]
else:
bio_tags = ["O" for _ in row["labels"]]
phones = [x.split(" ") for x in phones]
_ppel = splitted_text.copy()
for _phone in phones:
ind = find_sub_list(_phone, _ppel)
if ind is None:
ind = find_sub_list(_phone, _ppel, strict=False)
if ind is None:
print(f"entity {_phone} not found in text")
continue
for i, _tag in zip(ind, _phone):
bio_tags[i] = 'PHONE'
_ppel = [None for _ in _ppel[:ind[0] + len(_phone)]] + _ppel[min(len(_ppel), ind[0] + len(_phone)):]
return pd.Series(
{"file": row["file"], "raw_text": row["raw_text"], "text": splitted_text, "labels": row["labels"],
"predicted_labels": bio_tags})
@staticmethod
def regex_time(row):
"""
Finds times in texts using REGEX rules.
:param row: DataFrame row
:return: pd.Series with file, text and label columns
"""
# Loads text
raw_ppel = row["raw_text"]
# REGEX time patterns
regex_pattern = [r"[0-9][0-9]?[\:][0-9][0-9]?", r"[0-9][0-9]?[Hh][0-9]?[0-9]?",
r"[0-9][0-9]?\s[hH][eE][uU][rR][eE][s]?\s[0-9]?[0-9]?",
r"[0-9][0-9]?\s[Hh]\s[0-9]?[0-9]?"]
# Finds time patterns
times = []
for pattern in regex_pattern:
for _time in re.finditer(pattern, raw_ppel):
s = _time.start()
e = _time.end()
times.append((s, raw_ppel[s:e].strip()))
times.sort(key=itemgetter(0))
times = [x[1] for x in times]
# Creates labels
splitted_text = row["text"]
if "predicted_labels" in row.keys():
bio_tags = row["predicted_labels"]
else:
bio_tags = ["O" for _ in row["labels"]]
times = [x.split(" ") for x in times]
_ppel = splitted_text.copy()
for _time in times:
ind = find_sub_list(_time, _ppel)
if ind is None:
ind = find_sub_list(_time, _ppel, strict=False)
if ind is None:
print(f"entity {_time} not found in text")
continue
for i, _tag in zip(ind, _time):
bio_tags[i] = 'TIME'
_ppel = [None for _ in _ppel[:ind[0] + len(_time)]] + _ppel[min(len(_ppel), ind[0] + len(_time)):]
return pd.Series(
{"file": row["file"], "raw_text": row["raw_text"], "text": splitted_text, "labels": row["labels"],
"predicted_labels": bio_tags})
def run_predict_and_eval(self, dataset, model, tokenizer, labels, save_folder, no_evaluation=False, no_saving=False,
threshold=None):
"""
:param dataset:
:param model:
:param tokenizer:
:param labels:
:param save_folder:
:param no_evaluation:
:param no_saving:
:param threshold:
:return:
"""
batch_size = self.per_gpu_batch_size
sampler = SequentialSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size)
label_map = {i: label for i, label in enumerate(labels)}
if threshold is not None:
threshold = {ind: threshold[ent] if ent in threshold.keys() else 1000 for ind, ent in label_map.items()}
processed_corpus = pd.DataFrame()
eval_loss = 0.0
nb_eval_steps = 0
model.to(self.device)
model.eval()
for batch in tqdm(dataloader, desc="Evaluating", position=0, leave=True):
batch = tuple(t.to(self.device) for t in batch)
with torch.no_grad():
_inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3],
"token_type_ids": None}
outputs = model(**_inputs)
tmp_eval_loss, _output_probabilities = outputs[:2]
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
files, text_parts, text_tokens, real_labels, predicted_labels, labels_probabilities = \
self.extract_info_from_batch(tokenizer, batch, _output_probabilities, label_map, threshold)
processed_corpus = pd.concat([processed_corpus, pd.DataFrame({"file": files, "text_part": text_parts,
"text": text_tokens,
"labels": real_labels,
"predicted_labels": predicted_labels,
"labels_probabilities":
labels_probabilities})])
# Evaluate results
if (not no_evaluation) & (len(processed_corpus) > 0):
eval_loss = eval_loss / nb_eval_steps if nb_eval_steps else 0
results = self.evaluate_and_display_results(eval_loss, processed_corpus["labels"].tolist(),
processed_corpus["predicted_labels"].tolist(),
labels, no_saving, save_folder)
else:
results = None
return results, processed_corpus.reset_index(drop=True)
def run_training(self, train_dataset, test_dataset, model, tokenizer, labels, labels_weights):
"""
Train a transformer model.
:param train_dataset:
:param test_dataset:
:param model:
:param tokenizer:
:param labels:
:param labels_weights:
:return:
"""
train_batch_size = self.per_gpu_batch_size
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=train_batch_size)
t_total = len(train_dataloader) // self.gradient_accumulation_steps * self.num_train_epochs
# Initializing optimizer
optimizer, scheduler = self.set_scheduler_and_optimizer(model, t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
model.to(self.device)
for step in range(int(self.num_train_epochs)):
self.logger.info(f"############ EPOCH : {step + 1} / {self.num_train_epochs} ############\n")
epoch_iterator = tqdm(train_dataloader, desc="Iteration", position=0, leave=True)
for batch in epoch_iterator:
# Ce n'est pas ici qu'a lieu le training, on passe simplement le modèle en mode entrainement
model.train()
batch = tuple(t.to(self.device) for t in batch)
_inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3],
"token_type_ids": None}
# Appelle la fonction forward de la classe RobertaForTokenClassification
outputs = model(**_inputs)
loss = self.loss_with_weights(batch[3], batch[1], outputs[1], labels_weights)
loss.backward()
tr_loss += loss.item()
if (step + 1) % self.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), self.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if 0 < self.max_steps < global_step:
epoch_iterator.close()
break
# On évalue les performances du modèle à la fin de chaque epoch
self.run_predict_and_eval(test_dataset, model, tokenizer, labels, None, no_saving=True)
# Sauvegarge du modèle final et suppression des checkpoints
save_path = os.path.join(self.output_dir, f"{model.__class__.__name__}.pt")
self.save_model(save_path, model)
return save_path
@staticmethod
def save_model(save_path, model):
torch.save(model, save_path)
def save_config_and_logs(self):
# Export du fichier log et json
self.output_dir = os.path.join(self.inputs["output_dir"], f"{datetime.now().strftime('%m_%d_%Y_%H%M%S')}")
os.mkdir(self.output_dir)
_log_file = os.path.join(self.output_dir, "log.txt")
logging.basicConfig(filename=_log_file, level=self.log_level,
format='%(asctime)s %(name)s %(levelname)s:%(message)s')
_json_file = os.path.join(self.output_dir, "config.json")
with open(_json_file, "w") as json_file:
json.dump(self.inputs, json_file)
def set_scheduler_and_optimizer(self, model, t_total):
# Linear warmup and decay
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=self.learning_rate, eps=self.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=self.warmup_steps,
num_training_steps=t_total)
return optimizer, scheduler
@staticmethod
def set_seed(seed_num):
seed(seed_num)
np.random.seed(seed_num)
torch.manual_seed(seed_num)
def train_model_on_corpus(self, train_corpus, test_corpus):
"""
Entrainement d'un modèle de reconnaissance d'entités nommées.
:param train_corpus: DataFrame du corpus de textes d'entrainement
:param test_corpus: DataFrame du corpus de textes de test
"""
self.parse_input_json()
# Loading labels
labels, labels_weight = self.load_labels(train_corpus)
# Loading model and tokenizer
model, tokenizer = self.load_model_and_tokenizer(labels)
# Loading training and eval datasets
train_dataset = self.load_and_cache_texts(train_corpus, tokenizer, labels)
test_dataset = self.load_and_cache_texts(test_corpus, tokenizer, labels)
# Save config and logs
self.save_config_and_logs()
# Train model
self.run_training(train_dataset, test_dataset, model, tokenizer, labels, labels_weight)
# Show examples
_, processed_corpus = \
self.run_predict_and_eval(test_dataset, model, tokenizer, labels, None, no_evaluation=True)
show_legend(self.list_entities)
show_annotations(processed_corpus, self.list_entities, self.white_space_token)
class Pseudo:
def __init__(self, _names_path, _address_path, _car_path, societies_path, labels_column, labels_format):
self.names_path = _names_path
self.address_path = _address_path
self.car_path = _car_path
self.societies_path = societies_path
self.labels_col = labels_column
self.labels_format = labels_format
self.fake = Faker('fr_FR')
Faker.seed()
self.address, self.names, self.zip, self.cars, self.societies, self.train_df, self.dev_df, self.test_df = \
[None] * 8
def chain_of_replacements_other_entity(self, corpus, list_entities):
"""
Remplace toutes les entités de la liste donnée par d'autres entités factices du meme type dans le corpus.
:param corpus: DataFrame du corpus de textes
:param list_entities: liste des entités à remplacer
return: corpus avec entités remplacées
"""
self.address = pd.read_csv(self.address_path)
self.names = pd.read_csv(self.names_path)
self.zip = self.address['postcode'].unique().tolist()
self.cars = pd.read_csv(self.car_path)
self.societies = | pd.read_csv(self.societies_path) | pandas.read_csv |
"""
Linear mixed effects models are regression models for dependent data.
They can be used to estimate regression relationships involving both
means and variances.
These models are also known as multilevel linear models, and
hierarchical linear models.
The MixedLM class fits linear mixed effects models to data, and
provides support for some common post-estimation tasks. This is a
group-based implementation that is most efficient for models in which
the data can be partitioned into independent groups. Some models with
crossed effects can be handled by specifying a model with a single
group.
The data are partitioned into disjoint groups. The probability model
for group i is:
Y = X*beta + Z*gamma + epsilon
where
* n_i is the number of observations in group i
* Y is a n_i dimensional response vector (called endog in MixedLM)
* X is a n_i x k_fe dimensional design matrix for the fixed effects
(called exog in MixedLM)
* beta is a k_fe-dimensional vector of fixed effects parameters
(called fe_params in MixedLM)
* Z is a design matrix for the random effects with n_i rows (called
exog_re in MixedLM). The number of columns in Z can vary by group
as discussed below.
* gamma is a random vector with mean 0. The covariance matrix for the
first `k_re` elements of `gamma` (called cov_re in MixedLM) is
common to all groups. The remaining elements of `gamma` are
variance components as discussed in more detail below. Each group
receives its own independent realization of gamma.
* epsilon is a n_i dimensional vector of iid normal
errors with mean 0 and variance sigma^2; the epsilon
values are independent both within and between groups
Y, X and Z must be entirely observed. beta, Psi, and sigma^2 are
estimated using ML or REML estimation, and gamma and epsilon are
random so define the probability model.
The marginal mean structure is E[Y | X, Z] = X*beta. If only the mean
structure is of interest, GEE is an alternative to using linear mixed
models.
Two types of random effects are supported. Standard random effects
are correlated with each other in arbitrary ways. Every group has the
same number (`k_re`) of standard random effects, with the same joint
distribution (but with independent realizations across the groups).
Variance components are uncorrelated with each other, and with the
standard random effects. Each variance component has mean zero, and
all realizations of a given variance component have the same variance
parameter. The number of realized variance components per variance
parameter can differ across the groups.
The primary reference for the implementation details is:
<NAME>, <NAME> (1988). "Newton Raphson and EM algorithms for
linear mixed effects models for repeated measures data". Journal of
the American Statistical Association. Volume 83, Issue 404, pages
1014-1022.
See also this more recent document:
http://econ.ucsb.edu/~doug/245a/Papers/Mixed%20Effects%20Implement.pdf
All the likelihood, gradient, and Hessian calculations closely follow
Lindstrom and Bates 1988, adapted to support variance components.
The following two documents are written more from the perspective of
users:
http://lme4.r-forge.r-project.org/lMMwR/lrgprt.pdf
http://lme4.r-forge.r-project.org/slides/2009-07-07-Rennes/3Longitudinal-4.pdf
Notation:
* `cov_re` is the random effects covariance matrix (referred to above
as Psi) and `scale` is the (scalar) error variance. For a single
group, the marginal covariance matrix of endog given exog is scale*I
+ Z * cov_re * Z', where Z is the design matrix for the random
effects in one group.
* `vcomp` is a vector of variance parameters. The length of `vcomp`
is determined by the number of keys in either the `exog_vc` argument
to ``MixedLM``, or the `vc_formula` argument when using formulas to
fit a model.
Notes:
1. Three different parameterizations are used in different places.
The regression slopes (usually called `fe_params`) are identical in
all three parameterizations, but the variance parameters differ. The
parameterizations are:
* The "user parameterization" in which cov(endog) = scale*I + Z *
cov_re * Z', as described above. This is the main parameterization
visible to the user.
* The "profile parameterization" in which cov(endog) = I +
Z * cov_re1 * Z'. This is the parameterization of the profile
likelihood that is maximized to produce parameter estimates.
(see Lindstrom and Bates for details). The "user" cov_re is
equal to the "profile" cov_re1 times the scale.
* The "square root parameterization" in which we work with the Cholesky
factor of cov_re1 instead of cov_re directly. This is hidden from the
user.
All three parameterizations can be packed into a vector by
(optionally) concatenating `fe_params` together with the lower
triangle or Cholesky square root of the dependence structure, followed
by the variance parameters for the variance components. The are
stored as square roots if (and only if) the random effects covariance
matrix is stored as its Cholesky factor. Note that when unpacking, it
is important to either square or reflect the dependence structure
depending on which parameterization is being used.
Two score methods are implemented. One takes the score with respect
to the elements of the random effects covariance matrix (used for
inference once the MLE is reached), and the other takes the score with
respect to the parameters of the Cholesky square root of the random
effects covariance matrix (used for optimization).
The numerical optimization uses GLS to avoid explicitly optimizing
over the fixed effects parameters. The likelihood that is optimized
is profiled over both the scale parameter (a scalar) and the fixed
effects parameters (if any). As a result of this profiling, it is
difficult and unnecessary to calculate the Hessian of the profiled log
likelihood function, so that calculation is not implemented here.
Therefore, optimization methods requiring the Hessian matrix such as
the Newton-Raphson algorithm cannot be used for model fitting.
"""
import warnings
import numpy as np
import pandas as pd
import patsy
from scipy import sparse
from scipy.stats.distributions import norm
from statsmodels.base._penalties import Penalty
import statsmodels.base.model as base
from statsmodels.tools import data as data_tools
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ConvergenceWarning
_warn_cov_sing = "The random effects covariance matrix is singular."
def _dot(x, y):
"""
Returns the dot product of the arrays, works for sparse and dense.
"""
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
return np.dot(x, y)
elif sparse.issparse(x):
return x.dot(y)
elif sparse.issparse(y):
return y.T.dot(x.T).T
# From numpy, adapted to work with sparse and dense arrays.
def _multi_dot_three(A, B, C):
"""
Find best ordering for three arrays and do the multiplication.
Doing in manually instead of using dynamic programing is
approximately 15 times faster.
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return _dot(_dot(A, B), C)
else:
return _dot(A, _dot(B, C))
def _dotsum(x, y):
"""
Returns sum(x * y), where '*' is the pointwise product, computed
efficiently for dense and sparse matrices.
"""
if sparse.issparse(x):
return x.multiply(y).sum()
else:
# This way usually avoids allocating a temporary.
return np.dot(x.ravel(), y.ravel())
class VCSpec:
"""
Define the variance component structure of a multilevel model.
An instance of the class contains three attributes:
- names : names[k] is the name of variance component k.
- mats : mats[k][i] is the design matrix for group index
i in variance component k.
- colnames : colnames[k][i] is the list of column names for
mats[k][i].
The groups in colnames and mats must be in sorted order.
"""
def __init__(self, names, colnames, mats):
self.names = names
self.colnames = colnames
self.mats = mats
def _get_exog_re_names(self, exog_re):
"""
Passes through if given a list of names. Otherwise, gets pandas names
or creates some generic variable names as needed.
"""
if self.k_re == 0:
return []
if isinstance(exog_re, pd.DataFrame):
return exog_re.columns.tolist()
elif isinstance(exog_re, pd.Series) and exog_re.name is not None:
return [exog_re.name]
elif isinstance(exog_re, list):
return exog_re
# Default names
defnames = ["x_re{0:1d}".format(k + 1) for k in range(exog_re.shape[1])]
return defnames
class MixedLMParams:
"""
This class represents a parameter state for a mixed linear model.
Parameters
----------
k_fe : int
The number of covariates with fixed effects.
k_re : int
The number of covariates with random coefficients (excluding
variance components).
k_vc : int
The number of variance components parameters.
Notes
-----
This object represents the parameter state for the model in which
the scale parameter has been profiled out.
"""
def __init__(self, k_fe, k_re, k_vc):
self.k_fe = k_fe
self.k_re = k_re
self.k_re2 = k_re * (k_re + 1) // 2
self.k_vc = k_vc
self.k_tot = self.k_fe + self.k_re2 + self.k_vc
self._ix = np.tril_indices(self.k_re)
def from_packed(params, k_fe, k_re, use_sqrt, has_fe):
"""
Create a MixedLMParams object from packed parameter vector.
Parameters
----------
params : array_like
The mode parameters packed into a single vector.
k_fe : int
The number of covariates with fixed effects
k_re : int
The number of covariates with random effects (excluding
variance components).
use_sqrt : bool
If True, the random effects covariance matrix is provided
as its Cholesky factor, otherwise the lower triangle of
the covariance matrix is stored.
has_fe : bool
If True, `params` contains fixed effects parameters.
Otherwise, the fixed effects parameters are set to zero.
Returns
-------
A MixedLMParams object.
"""
k_re2 = int(k_re * (k_re + 1) / 2)
# The number of covariance parameters.
if has_fe:
k_vc = len(params) - k_fe - k_re2
else:
k_vc = len(params) - k_re2
pa = MixedLMParams(k_fe, k_re, k_vc)
cov_re = np.zeros((k_re, k_re))
ix = pa._ix
if has_fe:
pa.fe_params = params[0:k_fe]
cov_re[ix] = params[k_fe:k_fe+k_re2]
else:
pa.fe_params = np.zeros(k_fe)
cov_re[ix] = params[0:k_re2]
if use_sqrt:
cov_re = np.dot(cov_re, cov_re.T)
else:
cov_re = (cov_re + cov_re.T) - np.diag(np.diag(cov_re))
pa.cov_re = cov_re
if k_vc > 0:
if use_sqrt:
pa.vcomp = params[-k_vc:]**2
else:
pa.vcomp = params[-k_vc:]
else:
pa.vcomp = np.array([])
return pa
from_packed = staticmethod(from_packed)
def from_components(fe_params=None, cov_re=None, cov_re_sqrt=None,
vcomp=None):
"""
Create a MixedLMParams object from each parameter component.
Parameters
----------
fe_params : array_like
The fixed effects parameter (a 1-dimensional array). If
None, there are no fixed effects.
cov_re : array_like
The random effects covariance matrix (a square, symmetric
2-dimensional array).
cov_re_sqrt : array_like
The Cholesky (lower triangular) square root of the random
effects covariance matrix.
vcomp : array_like
The variance component parameters. If None, there are no
variance components.
Returns
-------
A MixedLMParams object.
"""
if vcomp is None:
vcomp = np.empty(0)
if fe_params is None:
fe_params = np.empty(0)
if cov_re is None and cov_re_sqrt is None:
cov_re = np.empty((0, 0))
k_fe = len(fe_params)
k_vc = len(vcomp)
k_re = cov_re.shape[0] if cov_re is not None else cov_re_sqrt.shape[0]
pa = MixedLMParams(k_fe, k_re, k_vc)
pa.fe_params = fe_params
if cov_re_sqrt is not None:
pa.cov_re = np.dot(cov_re_sqrt, cov_re_sqrt.T)
elif cov_re is not None:
pa.cov_re = cov_re
pa.vcomp = vcomp
return pa
from_components = staticmethod(from_components)
def copy(self):
"""
Returns a copy of the object.
"""
obj = MixedLMParams(self.k_fe, self.k_re, self.k_vc)
obj.fe_params = self.fe_params.copy()
obj.cov_re = self.cov_re.copy()
obj.vcomp = self.vcomp.copy()
return obj
def get_packed(self, use_sqrt, has_fe=False):
"""
Return the model parameters packed into a single vector.
Parameters
----------
use_sqrt : bool
If True, the Cholesky square root of `cov_re` is
included in the packed result. Otherwise the
lower triangle of `cov_re` is included.
has_fe : bool
If True, the fixed effects parameters are included
in the packed result, otherwise they are omitted.
"""
if self.k_re > 0:
if use_sqrt:
try:
L = np.linalg.cholesky(self.cov_re)
except np.linalg.LinAlgError:
L = np.diag(np.sqrt(np.diag(self.cov_re)))
cpa = L[self._ix]
else:
cpa = self.cov_re[self._ix]
else:
cpa = np.zeros(0)
if use_sqrt:
vcomp = np.sqrt(self.vcomp)
else:
vcomp = self.vcomp
if has_fe:
pa = np.concatenate((self.fe_params, cpa, vcomp))
else:
pa = np.concatenate((cpa, vcomp))
return pa
def _smw_solver(s, A, AtA, Qi, di):
r"""
Returns a solver for the linear system:
.. math::
(sI + ABA^\prime) y = x
The returned function f satisfies f(x) = y as defined above.
B and its inverse matrix are block diagonal. The upper left block
of :math:`B^{-1}` is Qi and its lower right block is diag(di).
Parameters
----------
s : scalar
See above for usage
A : ndarray
p x q matrix, in general q << p, may be sparse.
AtA : square ndarray
:math:`A^\prime A`, a q x q matrix.
Qi : square symmetric ndarray
The matrix `B` is q x q, where q = r + d. `B` consists of a r
x r diagonal block whose inverse is `Qi`, and a d x d diagonal
block, whose inverse is diag(di).
di : 1d array_like
See documentation for Qi.
Returns
-------
A function for solving a linear system, as documented above.
Notes
-----
Uses Sherman-Morrison-Woodbury identity:
https://en.wikipedia.org/wiki/Woodbury_matrix_identity
"""
# Use SMW identity
qmat = AtA / s
m = Qi.shape[0]
qmat[0:m, 0:m] += Qi
if sparse.issparse(A):
qmat[m:, m:] += sparse.diags(di)
def solver(rhs):
ql = A.T.dot(rhs)
# Based on profiling, the next line can be the
# majority of the entire run time of fitting the model.
ql = sparse.linalg.spsolve(qmat, ql)
if ql.ndim < rhs.ndim:
# spsolve squeezes nx1 rhs
ql = ql[:, None]
ql = A.dot(ql)
return rhs / s - ql / s**2
else:
d = qmat.shape[0]
qmat.flat[m*(d+1)::d+1] += di
qmati = np.linalg.solve(qmat, A.T)
def solver(rhs):
# A is tall and qmati is wide, so we want
# A * (qmati * rhs) not (A * qmati) * rhs
ql = np.dot(qmati, rhs)
ql = np.dot(A, ql)
return rhs / s - ql / s**2
return solver
def _smw_logdet(s, A, AtA, Qi, di, B_logdet):
r"""
Returns the log determinant of
.. math::
sI + ABA^\prime
Uses the matrix determinant lemma to accelerate the calculation.
B is assumed to be positive definite, and s > 0, therefore the
determinant is positive.
Parameters
----------
s : positive scalar
See above for usage
A : ndarray
p x q matrix, in general q << p.
AtA : square ndarray
:math:`A^\prime A`, a q x q matrix.
Qi : square symmetric ndarray
The matrix `B` is q x q, where q = r + d. `B` consists of a r
x r diagonal block whose inverse is `Qi`, and a d x d diagonal
block, whose inverse is diag(di).
di : 1d array_like
See documentation for Qi.
B_logdet : real
The log determinant of B
Returns
-------
The log determinant of s*I + A*B*A'.
Notes
-----
Uses the matrix determinant lemma:
https://en.wikipedia.org/wiki/Matrix_determinant_lemma
"""
p = A.shape[0]
ld = p * np.log(s)
qmat = AtA / s
m = Qi.shape[0]
qmat[0:m, 0:m] += Qi
if sparse.issparse(qmat):
qmat[m:, m:] += sparse.diags(di)
# There are faster but much more difficult ways to do this
# https://stackoverflow.com/questions/19107617
lu = sparse.linalg.splu(qmat)
dl = lu.L.diagonal().astype(np.complex128)
du = lu.U.diagonal().astype(np.complex128)
ld1 = np.log(dl).sum() + np.log(du).sum()
ld1 = ld1.real
else:
d = qmat.shape[0]
qmat.flat[m*(d+1)::d+1] += di
_, ld1 = np.linalg.slogdet(qmat)
return B_logdet + ld + ld1
def _convert_vc(exog_vc):
vc_names = []
vc_colnames = []
vc_mats = []
# Get the groups in sorted order
groups = set()
for k, v in exog_vc.items():
groups |= set(v.keys())
groups = list(groups)
groups.sort()
for k, v in exog_vc.items():
vc_names.append(k)
colnames, mats = [], []
for g in groups:
try:
colnames.append(v[g].columns)
except AttributeError:
colnames.append([str(j) for j in range(v[g].shape[1])])
mats.append(v[g])
vc_colnames.append(colnames)
vc_mats.append(mats)
ii = np.argsort(vc_names)
vc_names = [vc_names[i] for i in ii]
vc_colnames = [vc_colnames[i] for i in ii]
vc_mats = [vc_mats[i] for i in ii]
return VCSpec(vc_names, vc_colnames, vc_mats)
class MixedLM(base.LikelihoodModel):
"""
Linear Mixed Effects Model
Parameters
----------
endog : 1d array_like
The dependent variable
exog : 2d array_like
A matrix of covariates used to determine the
mean structure (the "fixed effects" covariates).
groups : 1d array_like
A vector of labels determining the groups -- data from
different groups are independent
exog_re : 2d array_like
A matrix of covariates used to determine the variance and
covariance structure (the "random effects" covariates). If
None, defaults to a random intercept for each group.
exog_vc : VCSpec instance or dict-like (deprecated)
A VCSPec instance defines the structure of the variance
components in the model. Alternatively, see notes below
for a dictionary-based format. The dictionary format is
deprecated and may be removed at some point in the future.
use_sqrt : bool
If True, optimization is carried out using the lower
triangle of the square root of the random effects
covariance matrix, otherwise it is carried out using the
lower triangle of the random effects covariance matrix.
missing : str
The approach to missing data handling
Notes
-----
If `exog_vc` is not a `VCSpec` instance, then it must be a
dictionary of dictionaries. Specifically, `exog_vc[a][g]` is a
matrix whose columns are linearly combined using independent
random coefficients. This random term then contributes to the
variance structure of the data for group `g`. The random
coefficients all have mean zero, and have the same variance. The
matrix must be `m x k`, where `m` is the number of observations in
group `g`. The number of columns may differ among the top-level
groups.
The covariates in `exog`, `exog_re` and `exog_vc` may (but need
not) partially or wholly overlap.
`use_sqrt` should almost always be set to True. The main use case
for use_sqrt=False is when complicated patterns of fixed values in
the covariance structure are set (using the `free` argument to
`fit`) that cannot be expressed in terms of the Cholesky factor L.
Examples
--------
A basic mixed model with fixed effects for the columns of
``exog`` and a random intercept for each distinct value of
``group``:
>>> model = sm.MixedLM(endog, exog, groups)
>>> result = model.fit()
A mixed model with fixed effects for the columns of ``exog`` and
correlated random coefficients for the columns of ``exog_re``:
>>> model = sm.MixedLM(endog, exog, groups, exog_re=exog_re)
>>> result = model.fit()
A mixed model with fixed effects for the columns of ``exog`` and
independent random coefficients for the columns of ``exog_re``:
>>> free = MixedLMParams.from_components(
fe_params=np.ones(exog.shape[1]),
cov_re=np.eye(exog_re.shape[1]))
>>> model = sm.MixedLM(endog, exog, groups, exog_re=exog_re)
>>> result = model.fit(free=free)
A different way to specify independent random coefficients for the
columns of ``exog_re``. In this example ``groups`` must be a
Pandas Series with compatible indexing with ``exog_re``, and
``exog_re`` has two columns.
>>> g = pd.groupby(groups, by=groups).groups
>>> vc = {}
>>> vc['1'] = {k : exog_re.loc[g[k], 0] for k in g}
>>> vc['2'] = {k : exog_re.loc[g[k], 1] for k in g}
>>> model = sm.MixedLM(endog, exog, groups, vcomp=vc)
>>> result = model.fit()
"""
def __init__(self, endog, exog, groups, exog_re=None,
exog_vc=None, use_sqrt=True, missing='none',
**kwargs):
_allowed_kwargs = ["missing_idx", "design_info", "formula"]
for x in kwargs.keys():
if x not in _allowed_kwargs:
raise ValueError(
"argument %s not permitted for MixedLM initialization" % x)
self.use_sqrt = use_sqrt
# Some defaults
self.reml = True
self.fe_pen = None
self.re_pen = None
if isinstance(exog_vc, dict):
warnings.warn("Using deprecated variance components format")
# Convert from old to new representation
exog_vc = _convert_vc(exog_vc)
if exog_vc is not None:
self.k_vc = len(exog_vc.names)
self.exog_vc = exog_vc
else:
self.k_vc = 0
self.exog_vc = VCSpec([], [], [])
# If there is one covariate, it may be passed in as a column
# vector, convert these to 2d arrays.
# TODO: Can this be moved up in the class hierarchy?
# yes, it should be done up the hierarchy
if (exog is not None and
data_tools._is_using_ndarray_type(exog, None) and
exog.ndim == 1):
exog = exog[:, None]
if (exog_re is not None and
data_tools._is_using_ndarray_type(exog_re, None) and
exog_re.ndim == 1):
exog_re = exog_re[:, None]
# Calling super creates self.endog, etc. as ndarrays and the
# original exog, endog, etc. are self.data.endog, etc.
super(MixedLM, self).__init__(endog, exog, groups=groups,
exog_re=exog_re, missing=missing,
**kwargs)
self._init_keys.extend(["use_sqrt", "exog_vc"])
# Number of fixed effects parameters
self.k_fe = exog.shape[1]
if exog_re is None and len(self.exog_vc.names) == 0:
# Default random effects structure (random intercepts).
self.k_re = 1
self.k_re2 = 1
self.exog_re = np.ones((len(endog), 1), dtype=np.float64)
self.data.exog_re = self.exog_re
names = ['Group Var']
self.data.param_names = self.exog_names + names
self.data.exog_re_names = names
self.data.exog_re_names_full = names
elif exog_re is not None:
# Process exog_re the same way that exog is handled
# upstream
# TODO: this is wrong and should be handled upstream wholly
self.data.exog_re = exog_re
self.exog_re = np.asarray(exog_re)
if self.exog_re.ndim == 1:
self.exog_re = self.exog_re[:, None]
# Model dimensions
# Number of random effect covariates
self.k_re = self.exog_re.shape[1]
# Number of covariance parameters
self.k_re2 = self.k_re * (self.k_re + 1) // 2
else:
# All random effects are variance components
self.k_re = 0
self.k_re2 = 0
if not self.data._param_names:
# HACK: could have been set in from_formula already
# needs refactor
(param_names, exog_re_names,
exog_re_names_full) = self._make_param_names(exog_re)
self.data.param_names = param_names
self.data.exog_re_names = exog_re_names
self.data.exog_re_names_full = exog_re_names_full
self.k_params = self.k_fe + self.k_re2
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
group_labels = list(set(groups))
group_labels.sort()
row_indices = dict((s, []) for s in group_labels)
for i, g in enumerate(groups):
row_indices[g].append(i)
self.row_indices = row_indices
self.group_labels = group_labels
self.n_groups = len(self.group_labels)
# Split the data by groups
self.endog_li = self.group_list(self.endog)
self.exog_li = self.group_list(self.exog)
self.exog_re_li = self.group_list(self.exog_re)
# Precompute this.
if self.exog_re is None:
self.exog_re2_li = None
else:
self.exog_re2_li = [np.dot(x.T, x) for x in self.exog_re_li]
# The total number of observations, summed over all groups
self.nobs = len(self.endog)
self.n_totobs = self.nobs
# Set the fixed effects parameter names
if self.exog_names is None:
self.exog_names = ["FE%d" % (k + 1) for k in
range(self.exog.shape[1])]
# Precompute this
self._aex_r = []
self._aex_r2 = []
for i in range(self.n_groups):
a = self._augment_exog(i)
self._aex_r.append(a)
ma = _dot(a.T, a)
self._aex_r2.append(ma)
# Precompute this
self._lin, self._quad = self._reparam()
def _make_param_names(self, exog_re):
"""
Returns the full parameter names list, just the exogenous random
effects variables, and the exogenous random effects variables with
the interaction terms.
"""
exog_names = list(self.exog_names)
exog_re_names = _get_exog_re_names(self, exog_re)
param_names = []
jj = self.k_fe
for i in range(len(exog_re_names)):
for j in range(i + 1):
if i == j:
param_names.append(exog_re_names[i] + " Var")
else:
param_names.append(exog_re_names[j] + " x " +
exog_re_names[i] + " Cov")
jj += 1
vc_names = [x + " Var" for x in self.exog_vc.names]
return exog_names + param_names + vc_names, exog_re_names, param_names
@classmethod
def from_formula(cls, formula, data, re_formula=None, vc_formula=None,
subset=None, use_sparse=False, missing='none', *args,
**kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array_like
The data for the model. See Notes.
re_formula : str
A one-sided formula defining the variance structure of the
model. The default gives a random intercept for each
group.
vc_formula : dict-like
Formulas describing variance components. `vc_formula[vc]` is
the formula for the component with variance parameter named
`vc`. The formula is processed into a matrix, and the columns
of this matrix are linearly combined with independent random
coefficients having mean zero and a common variance.
subset : array_like
An array-like object of booleans, integers, or index
values that indicate the subset of df to use in the
model. Assumes df is a `pandas.DataFrame`
missing : str
Either 'none' or 'drop'
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : Model instance
Notes
-----
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
If the variance component is intended to produce random
intercepts for disjoint subsets of a group, specified by
string labels or a categorical data value, always use '0 +' in
the formula so that no overall intercept is included.
If the variance components specify random slopes and you do
not also want a random group-level intercept in the model,
then use '0 +' in the formula to exclude the intercept.
The variance components formulas are processed separately for
each group. If a variable is categorical the results will not
be affected by whether the group labels are distinct or
re-used over the top-level groups.
Examples
--------
Suppose we have data from an educational study with students
nested in classrooms nested in schools. The students take a
test, and we want to relate the test scores to the students'
ages, while accounting for the effects of classrooms and
schools. The school will be the top-level group, and the
classroom is a nested group that is specified as a variance
component. Note that the schools may have different number of
classrooms, and the classroom labels may (but need not be)
different across the schools.
>>> vc = {'classroom': '0 + C(classroom)'}
>>> MixedLM.from_formula('test_score ~ age', vc_formula=vc, \
re_formula='1', groups='school', data=data)
Now suppose we also have a previous test score called
'pretest'. If we want the relationship between pretest
scores and the current test to vary by classroom, we can
specify a random slope for the pretest score
>>> vc = {'classroom': '0 + C(classroom)', 'pretest': '0 + pretest'}
>>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc, \
re_formula='1', groups='school', data=data)
The following model is almost equivalent to the previous one,
but here the classroom random intercept and pretest slope may
be correlated.
>>> vc = {'classroom': '0 + C(classroom)'}
>>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc, \
re_formula='1 + pretest', groups='school', \
data=data)
"""
if "groups" not in kwargs.keys():
raise AttributeError("'groups' is a required keyword argument " +
"in MixedLM.from_formula")
groups = kwargs["groups"]
# If `groups` is a variable name, retrieve the data for the
# groups variable.
group_name = "Group"
if isinstance(groups, str):
group_name = groups
groups = np.asarray(data[groups])
else:
groups = np.asarray(groups)
del kwargs["groups"]
# Bypass all upstream missing data handling to properly handle
# variance components
if missing == 'drop':
data, groups = _handle_missing(data, groups, formula, re_formula,
vc_formula)
missing = 'none'
if re_formula is not None:
if re_formula.strip() == "1":
# Work around Patsy bug, fixed by 0.3.
exog_re = np.ones((data.shape[0], 1))
exog_re_names = [group_name]
else:
eval_env = kwargs.get('eval_env', None)
if eval_env is None:
eval_env = 1
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
exog_re = patsy.dmatrix(re_formula, data, eval_env=eval_env)
exog_re_names = exog_re.design_info.column_names
exog_re_names = [x.replace("Intercept", group_name)
for x in exog_re_names]
exog_re = np.asarray(exog_re)
if exog_re.ndim == 1:
exog_re = exog_re[:, None]
else:
exog_re = None
if vc_formula is None:
exog_re_names = [group_name]
else:
exog_re_names = []
if vc_formula is not None:
eval_env = kwargs.get('eval_env', None)
if eval_env is None:
eval_env = 1
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
vc_mats = []
vc_colnames = []
vc_names = []
gb = data.groupby(groups)
kylist = sorted(gb.groups.keys())
vcf = sorted(vc_formula.keys())
for vc_name in vcf:
md = patsy.ModelDesc.from_formula(vc_formula[vc_name])
vc_names.append(vc_name)
evc_mats, evc_colnames = [], []
for group_ix, group in enumerate(kylist):
ii = gb.groups[group]
mat = patsy.dmatrix(
md,
data.loc[ii, :],
eval_env=eval_env,
return_type='dataframe')
evc_colnames.append(mat.columns.tolist())
if use_sparse:
evc_mats.append(sparse.csr_matrix(mat))
else:
evc_mats.append(np.asarray(mat))
vc_mats.append(evc_mats)
vc_colnames.append(evc_colnames)
exog_vc = VCSpec(vc_names, vc_colnames, vc_mats)
else:
exog_vc = VCSpec([], [], [])
kwargs["subset"] = None
kwargs["exog_re"] = exog_re
kwargs["exog_vc"] = exog_vc
kwargs["groups"] = groups
mod = super(MixedLM, cls).from_formula(
formula, data, *args, **kwargs)
# expand re names to account for pairs of RE
(param_names,
exog_re_names,
exog_re_names_full) = mod._make_param_names(exog_re_names)
mod.data.param_names = param_names
mod.data.exog_re_names = exog_re_names
mod.data.exog_re_names_full = exog_re_names_full
if vc_formula is not None:
mod.data.vcomp_names = mod.exog_vc.names
return mod
def predict(self, params, exog=None):
"""
Return predicted values from a design matrix.
Parameters
----------
params : array_like
Parameters of a mixed linear model. Can be either a
MixedLMParams instance, or a vector containing the packed
model parameters in which the fixed effects parameters are
at the beginning of the vector, or a vector containing
only the fixed effects parameters.
exog : array_like, optional
Design / exogenous data for the fixed effects. Model exog
is used if None.
Returns
-------
An array of fitted values. Note that these predicted values
only reflect the fixed effects mean structure of the model.
"""
if exog is None:
exog = self.exog
if isinstance(params, MixedLMParams):
params = params.fe_params
else:
params = params[0:self.k_fe]
return np.dot(exog, params)
def group_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
grouping structure.
"""
if array is None:
return None
if array.ndim == 1:
return [np.array(array[self.row_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.row_indices[k], :])
for k in self.group_labels]
def fit_regularized(self, start_params=None, method='l1', alpha=0,
ceps=1e-4, ptol=1e-6, maxit=200, **fit_kwargs):
"""
Fit a model in which the fixed effects parameters are
penalized. The dependence parameters are held fixed at their
estimated values in the unpenalized model.
Parameters
----------
method : str of Penalty object
Method for regularization. If a string, must be 'l1'.
alpha : array_like
Scalar or vector of penalty weights. If a scalar, the
same weight is applied to all coefficients; if a vector,
it contains a weight for each coefficient. If method is a
Penalty object, the weights are scaled by alpha. For L1
regularization, the weights are used directly.
ceps : positive real scalar
Fixed effects parameters smaller than this value
in magnitude are treated as being zero.
ptol : positive real scalar
Convergence occurs when the sup norm difference
between successive values of `fe_params` is less than
`ptol`.
maxit : int
The maximum number of iterations.
**fit_kwargs
Additional keyword arguments passed to fit.
Returns
-------
A MixedLMResults instance containing the results.
Notes
-----
The covariance structure is not updated as the fixed effects
parameters are varied.
The algorithm used here for L1 regularization is a"shooting"
or cyclic coordinate descent algorithm.
If method is 'l1', then `fe_pen` and `cov_pen` are used to
obtain the covariance structure, but are ignored during the
L1-penalized fitting.
References
----------
<NAME>., <NAME>. and <NAME>. Regularized
Paths for Generalized Linear Models via Coordinate
Descent. Journal of Statistical Software, 33(1) (2008)
http://www.jstatsoft.org/v33/i01/paper
http://statweb.stanford.edu/~tibs/stat315a/Supplements/fuse.pdf
"""
if isinstance(method, str) and (method.lower() != 'l1'):
raise ValueError("Invalid regularization method")
# If method is a smooth penalty just optimize directly.
if isinstance(method, Penalty):
# Scale the penalty weights by alpha
method.alpha = alpha
fit_kwargs.update({"fe_pen": method})
return self.fit(**fit_kwargs)
if np.isscalar(alpha):
alpha = alpha * np.ones(self.k_fe, dtype=np.float64)
# Fit the unpenalized model to get the dependence structure.
mdf = self.fit(**fit_kwargs)
fe_params = mdf.fe_params
cov_re = mdf.cov_re
vcomp = mdf.vcomp
scale = mdf.scale
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
for itr in range(maxit):
fe_params_s = fe_params.copy()
for j in range(self.k_fe):
if abs(fe_params[j]) < ceps:
continue
# The residuals
fe_params[j] = 0.
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
# The loss function has the form
# a*x^2 + b*x + pwt*|x|
a, b = 0., 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group_ix)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
resid = resid_all[self.row_indices[group]]
solver = _smw_solver(scale, ex_r, ex2_r, cov_re_inv,
1 / vc_var)
x = exog[:, j]
u = solver(x)
a += np.dot(u, x)
b -= 2 * np.dot(u, resid)
pwt1 = alpha[j]
if b > pwt1:
fe_params[j] = -(b - pwt1) / (2 * a)
elif b < -pwt1:
fe_params[j] = -(b + pwt1) / (2 * a)
if np.abs(fe_params_s - fe_params).max() < ptol:
break
# Replace the fixed effects estimates with their penalized
# values, leave the dependence parameters in their unpenalized
# state.
params_prof = mdf.params.copy()
params_prof[0:self.k_fe] = fe_params
scale = self.get_scale(fe_params, mdf.cov_re_unscaled, mdf.vcomp)
# Get the Hessian including only the nonzero fixed effects,
# then blow back up to the full size after inverting.
hess, sing = self.hessian(params_prof)
if sing:
warnings.warn(_warn_cov_sing)
pcov = np.nan * np.ones_like(hess)
ii = np.abs(params_prof) > ceps
ii[self.k_fe:] = True
ii = np.flatnonzero(ii)
hess1 = hess[ii, :][:, ii]
pcov[np.ix_(ii, ii)] = np.linalg.inv(-hess1)
params_object = MixedLMParams.from_components(fe_params, cov_re=cov_re)
results = MixedLMResults(self, params_prof, pcov / scale)
results.params_object = params_object
results.fe_params = fe_params
results.cov_re = cov_re
results.scale = scale
results.cov_re_unscaled = mdf.cov_re_unscaled
results.method = mdf.method
results.converged = True
results.cov_pen = self.cov_pen
results.k_fe = self.k_fe
results.k_re = self.k_re
results.k_re2 = self.k_re2
results.k_vc = self.k_vc
return MixedLMResultsWrapper(results)
def get_fe_params(self, cov_re, vcomp, tol=1e-10):
"""
Use GLS to update the fixed effects parameter estimates.
Parameters
----------
cov_re : array_like (2d)
The covariance matrix of the random effects.
vcomp : array_like (1d)
The variance components.
tol : float
A tolerance parameter to determine when covariances
are singular.
Returns
-------
params : ndarray
The GLS estimates of the fixed effects parameters.
singular : bool
True if the covariance is singular
"""
if self.k_fe == 0:
return np.array([]), False
sing = False
if self.k_re == 0:
cov_re_inv = np.empty((0, 0))
else:
w, v = np.linalg.eigh(cov_re)
if w.min() < tol:
# Singular, use pseudo-inverse
sing = True
ii = np.flatnonzero(w >= tol)
if len(ii) == 0:
cov_re_inv = np.zeros_like(cov_re)
else:
vi = v[:, ii]
wi = w[ii]
cov_re_inv = np.dot(vi / wi, vi.T)
else:
cov_re_inv = np.linalg.inv(cov_re)
# Cache these quantities that do not change.
if not hasattr(self, "_endex_li"):
self._endex_li = []
for group_ix, _ in enumerate(self.group_labels):
mat = np.concatenate(
(self.exog_li[group_ix],
self.endog_li[group_ix][:, None]), axis=1)
self._endex_li.append(mat)
xtxy = 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group_ix)
if vc_var.size > 0:
if vc_var.min() < tol:
# Pseudo-inverse
sing = True
ii = np.flatnonzero(vc_var >= tol)
vc_vari = np.zeros_like(vc_var)
vc_vari[ii] = 1 / vc_var[ii]
else:
vc_vari = 1 / vc_var
else:
vc_vari = np.empty(0)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, vc_vari)
u = solver(self._endex_li[group_ix])
xtxy += np.dot(exog.T, u)
if sing:
fe_params = np.dot(np.linalg.pinv(xtxy[:, 0:-1]), xtxy[:, -1])
else:
fe_params = np.linalg.solve(xtxy[:, 0:-1], xtxy[:, -1])
return fe_params, sing
def _reparam(self):
"""
Returns parameters of the map converting parameters from the
form used in optimization to the form returned to the user.
Returns
-------
lin : list-like
Linear terms of the map
quad : list-like
Quadratic terms of the map
Notes
-----
If P are the standard form parameters and R are the
transformed parameters (i.e. with the Cholesky square root
covariance and square root transformed variance components),
then P[i] = lin[i] * R + R' * quad[i] * R
"""
k_fe, k_re, k_re2, k_vc = self.k_fe, self.k_re, self.k_re2, self.k_vc
k_tot = k_fe + k_re2 + k_vc
ix = np.tril_indices(self.k_re)
lin = []
for k in range(k_fe):
e = np.zeros(k_tot)
e[k] = 1
lin.append(e)
for k in range(k_re2):
lin.append(np.zeros(k_tot))
for k in range(k_vc):
lin.append(np.zeros(k_tot))
quad = []
# Quadratic terms for fixed effects.
for k in range(k_tot):
quad.append(np.zeros((k_tot, k_tot)))
# Quadratic terms for random effects covariance.
ii = np.tril_indices(k_re)
ix = [(a, b) for a, b in zip(ii[0], ii[1])]
for i1 in range(k_re2):
for i2 in range(k_re2):
ix1 = ix[i1]
ix2 = ix[i2]
if (ix1[1] == ix2[1]) and (ix1[0] <= ix2[0]):
ii = (ix2[0], ix1[0])
k = ix.index(ii)
quad[k_fe+k][k_fe+i2, k_fe+i1] += 1
for k in range(k_tot):
quad[k] = 0.5*(quad[k] + quad[k].T)
# Quadratic terms for variance components.
km = k_fe + k_re2
for k in range(km, km+k_vc):
quad[k][k, k] = 1
return lin, quad
def _expand_vcomp(self, vcomp, group_ix):
"""
Replicate variance parameters to match a group's design.
Parameters
----------
vcomp : array_like
The variance parameters for the variance components.
group_ix : int
The group index
Returns an expanded version of vcomp, in which each variance
parameter is copied as many times as there are independent
realizations of the variance component in the given group.
"""
if len(vcomp) == 0:
return np.empty(0)
vc_var = []
for j in range(len(self.exog_vc.names)):
d = self.exog_vc.mats[j][group_ix].shape[1]
vc_var.append(vcomp[j] * np.ones(d))
if len(vc_var) > 0:
return np.concatenate(vc_var)
else:
# Cannot reach here?
return np.empty(0)
def _augment_exog(self, group_ix):
"""
Concatenate the columns for variance components to the columns
for other random effects to obtain a single random effects
exog matrix for a given group.
"""
ex_r = self.exog_re_li[group_ix] if self.k_re > 0 else None
if self.k_vc == 0:
return ex_r
ex = [ex_r] if self.k_re > 0 else []
any_sparse = False
for j, _ in enumerate(self.exog_vc.names):
ex.append(self.exog_vc.mats[j][group_ix])
any_sparse |= sparse.issparse(ex[-1])
if any_sparse:
for j, x in enumerate(ex):
if not sparse.issparse(x):
ex[j] = sparse.csr_matrix(x)
ex = sparse.hstack(ex)
ex = sparse.csr_matrix(ex)
else:
ex = np.concatenate(ex, axis=1)
return ex
def loglike(self, params, profile_fe=True):
"""
Evaluate the (profile) log-likelihood of the linear mixed
effects model.
Parameters
----------
params : MixedLMParams, or array_like.
The parameter value. If array-like, must be a packed
parameter vector containing only the covariance
parameters.
profile_fe : bool
If True, replace the provided value of `fe_params` with
the GLS estimates.
Returns
-------
The log-likelihood value at `params`.
Notes
-----
The scale parameter `scale` is always profiled out of the
log-likelihood. In addition, if `profile_fe` is true the
fixed effects parameters are also profiled out.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=False)
cov_re = params.cov_re
vcomp = params.vcomp
# Move to the profile set
if profile_fe:
fe_params, sing = self.get_fe_params(cov_re, vcomp)
if sing:
self._cov_sing += 1
else:
fe_params = params.fe_params
if self.k_re > 0:
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = np.linalg.pinv(cov_re)
self._cov_sing += 1
_, cov_re_logdet = np.linalg.slogdet(cov_re)
else:
cov_re_inv = np.zeros((0, 0))
cov_re_logdet = 0
# The residuals
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
likeval = 0.
# Handle the covariance penalty
if (self.cov_pen is not None) and (self.k_re > 0):
likeval -= self.cov_pen.func(cov_re, cov_re_inv)
# Handle the fixed effects penalty
if (self.fe_pen is not None):
likeval -= self.fe_pen.func(fe_params)
xvx, qf = 0., 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group_ix)
cov_aug_logdet = cov_re_logdet + np.sum(np.log(vc_var))
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
resid = resid_all[self.row_indices[group]]
# Part 1 of the log likelihood (for both ML and REML)
ld = _smw_logdet(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var,
cov_aug_logdet)
likeval -= ld / 2.
# Part 2 of the log likelihood (for both ML and REML)
u = solver(resid)
qf += np.dot(resid, u)
# Adjustment for REML
if self.reml:
mat = solver(exog)
xvx += np.dot(exog.T, mat)
if self.reml:
likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.
_, ld = np.linalg.slogdet(xvx)
likeval -= ld / 2.
likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.
likeval += ((self.n_totobs - self.k_fe) *
np.log(self.n_totobs - self.k_fe) / 2.)
likeval -= (self.n_totobs - self.k_fe) / 2.
else:
likeval -= self.n_totobs * np.log(qf) / 2.
likeval -= self.n_totobs * np.log(2 * np.pi) / 2.
likeval += self.n_totobs * np.log(self.n_totobs) / 2.
likeval -= self.n_totobs / 2.
return likeval
def _gen_dV_dPar(self, ex_r, solver, group_ix, max_ix=None):
"""
A generator that yields the element-wise derivative of the
marginal covariance matrix with respect to the random effects
variance and covariance parameters.
ex_r : array_like
The random effects design matrix
solver : function
A function that given x returns V^{-1}x, where V
is the group's marginal covariance matrix.
group_ix : int
The group index
max_ix : {int, None}
If not None, the generator ends when this index
is reached.
"""
axr = solver(ex_r)
# Regular random effects
jj = 0
for j1 in range(self.k_re):
for j2 in range(j1 + 1):
if max_ix is not None and jj > max_ix:
return
# Need 2d
mat_l, mat_r = ex_r[:, j1:j1+1], ex_r[:, j2:j2+1]
vsl, vsr = axr[:, j1:j1+1], axr[:, j2:j2+1]
yield jj, mat_l, mat_r, vsl, vsr, j1 == j2
jj += 1
# Variance components
for j, _ in enumerate(self.exog_vc.names):
if max_ix is not None and jj > max_ix:
return
mat = self.exog_vc.mats[j][group_ix]
axmat = solver(mat)
yield jj, mat, mat, axmat, axmat, True
jj += 1
def score(self, params, profile_fe=True):
"""
Returns the score vector of the profile log-likelihood.
Notes
-----
The score vector that is returned is computed with respect to
the parameterization defined by this model instance's
`use_sqrt` attribute.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(
params, self.k_fe, self.k_re, self.use_sqrt,
has_fe=False)
if profile_fe:
params.fe_params, sing = \
self.get_fe_params(params.cov_re, params.vcomp)
if sing:
msg = "Random effects covariance is singular"
warnings.warn(msg)
if self.use_sqrt:
score_fe, score_re, score_vc = self.score_sqrt(
params, calc_fe=not profile_fe)
else:
score_fe, score_re, score_vc = self.score_full(
params, calc_fe=not profile_fe)
if self._freepat is not None:
score_fe *= self._freepat.fe_params
score_re *= self._freepat.cov_re[self._freepat._ix]
score_vc *= self._freepat.vcomp
if profile_fe:
return np.concatenate((score_re, score_vc))
else:
return np.concatenate((score_fe, score_re, score_vc))
def score_full(self, params, calc_fe):
"""
Returns the score with respect to untransformed parameters.
Calculates the score vector for the profiled log-likelihood of
the mixed effects model with respect to the parameterization
in which the random effects covariance matrix is represented
in its full form (not using the Cholesky factor).
Parameters
----------
params : MixedLMParams or array_like
The parameter at which the score function is evaluated.
If array-like, must contain the packed random effects
parameters (cov_re and vcomp) without fe_params.
calc_fe : bool
If True, calculate the score vector for the fixed effects
parameters. If False, this vector is not calculated, and
a vector of zeros is returned in its place.
Returns
-------
score_fe : array_like
The score vector with respect to the fixed effects
parameters.
score_re : array_like
The score vector with respect to the random effects
parameters (excluding variance components parameters).
score_vc : array_like
The score vector with respect to variance components
parameters.
Notes
-----
`score_re` is taken with respect to the parameterization in
which `cov_re` is represented through its lower triangle
(without taking the Cholesky square root).
"""
fe_params = params.fe_params
cov_re = params.cov_re
vcomp = params.vcomp
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = np.linalg.pinv(cov_re)
self._cov_sing += 1
score_fe = np.zeros(self.k_fe)
score_re = np.zeros(self.k_re2)
score_vc = np.zeros(self.k_vc)
# Handle the covariance penalty.
if self.cov_pen is not None:
score_re -= self.cov_pen.deriv(cov_re, cov_re_inv)
# Handle the fixed effects penalty.
if calc_fe and (self.fe_pen is not None):
score_fe -= self.fe_pen.deriv(fe_params)
# resid' V^{-1} resid, summed over the groups (a scalar)
rvir = 0.
# exog' V^{-1} resid, summed over the groups (a k_fe
# dimensional vector)
xtvir = 0.
# exog' V^{_1} exog, summed over the groups (a k_fe x k_fe
# matrix)
xtvix = 0.
# V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th
# covariance parameter.
xtax = [0., ] * (self.k_re2 + self.k_vc)
# Temporary related to the gradient of log |V|
dlv = np.zeros(self.k_re2 + self.k_vc)
# resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)
rvavr = np.zeros(self.k_re2 + self.k_vc)
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group_ix)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[group_ix]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
if self.reml:
viexog = solver(exog)
xtvix += np.dot(exog.T, viexog)
# Contributions to the covariance parameter gradient
vir = solver(resid)
for (jj, matl, matr, vsl, vsr, sym) in\
self._gen_dV_dPar(ex_r, solver, group_ix):
dlv[jj] = _dotsum(matr, vsl)
if not sym:
dlv[jj] += _dotsum(matl, vsr)
ul = _dot(vir, matl)
ur = ul.T if sym else _dot(matr.T, vir)
ulr = np.dot(ul, ur)
rvavr[jj] += ulr
if not sym:
rvavr[jj] += ulr.T
if self.reml:
ul = _dot(viexog.T, matl)
ur = ul.T if sym else _dot(matr.T, viexog)
ulr = np.dot(ul, ur)
xtax[jj] += ulr
if not sym:
xtax[jj] += ulr.T
# Contribution of log|V| to the covariance parameter
# gradient.
if self.k_re > 0:
score_re -= 0.5 * dlv[0:self.k_re2]
if self.k_vc > 0:
score_vc -= 0.5 * dlv[self.k_re2:]
rvir += np.dot(resid, vir)
if calc_fe:
xtvir += np.dot(exog.T, vir)
fac = self.n_totobs
if self.reml:
fac -= self.k_fe
if calc_fe and self.k_fe > 0:
score_fe += fac * xtvir / rvir
if self.k_re > 0:
score_re += 0.5 * fac * rvavr[0:self.k_re2] / rvir
if self.k_vc > 0:
score_vc += 0.5 * fac * rvavr[self.k_re2:] / rvir
if self.reml:
xtvixi = np.linalg.inv(xtvix)
for j in range(self.k_re2):
score_re[j] += 0.5 * _dotsum(xtvixi.T, xtax[j])
for j in range(self.k_vc):
score_vc[j] += 0.5 * _dotsum(xtvixi.T, xtax[self.k_re2 + j])
return score_fe, score_re, score_vc
def score_sqrt(self, params, calc_fe=True):
"""
Returns the score with respect to transformed parameters.
Calculates the score vector with respect to the
parameterization in which the random effects covariance matrix
is represented through its Cholesky square root.
Parameters
----------
params : MixedLMParams or array_like
The model parameters. If array-like must contain packed
parameters that are compatible with this model instance.
calc_fe : bool
If True, calculate the score vector for the fixed effects
parameters. If False, this vector is not calculated, and
a vector of zeros is returned in its place.
Returns
-------
score_fe : array_like
The score vector with respect to the fixed effects
parameters.
score_re : array_like
The score vector with respect to the random effects
parameters (excluding variance components parameters).
score_vc : array_like
The score vector with respect to variance components
parameters.
"""
score_fe, score_re, score_vc = self.score_full(params, calc_fe=calc_fe)
params_vec = params.get_packed(use_sqrt=True, has_fe=True)
score_full = np.concatenate((score_fe, score_re, score_vc))
scr = 0.
for i in range(len(params_vec)):
v = self._lin[i] + 2 * np.dot(self._quad[i], params_vec)
scr += score_full[i] * v
score_fe = scr[0:self.k_fe]
score_re = scr[self.k_fe:self.k_fe + self.k_re2]
score_vc = scr[self.k_fe + self.k_re2:]
return score_fe, score_re, score_vc
def hessian(self, params):
"""
Returns the model's Hessian matrix.
Calculates the Hessian matrix for the linear mixed effects
model with respect to the parameterization in which the
covariance matrix is represented directly (without square-root
transformation).
Parameters
----------
params : MixedLMParams or array_like
The model parameters at which the Hessian is calculated.
If array-like, must contain the packed parameters in a
form that is compatible with this model instance.
Returns
-------
hess : 2d ndarray
The Hessian matrix, evaluated at `params`.
sing : boolean
If True, the covariance matrix is singular and a
pseudo-inverse is returned.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe, self.k_re,
use_sqrt=self.use_sqrt,
has_fe=True)
fe_params = params.fe_params
vcomp = params.vcomp
cov_re = params.cov_re
sing = False
if self.k_re > 0:
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = np.linalg.pinv(cov_re)
sing = True
else:
cov_re_inv = np.empty((0, 0))
# Blocks for the fixed and random effects parameters.
hess_fe = 0.
hess_re = np.zeros((self.k_re2 + self.k_vc, self.k_re2 + self.k_vc))
hess_fere = np.zeros((self.k_re2 + self.k_vc, self.k_fe))
fac = self.n_totobs
if self.reml:
fac -= self.exog.shape[1]
rvir = 0.
xtvix = 0.
xtax = [0., ] * (self.k_re2 + self.k_vc)
m = self.k_re2 + self.k_vc
B = np.zeros(m)
D = np.zeros((m, m))
F = [[0.] * m for k in range(m)]
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group_ix)
vc_vari = np.zeros_like(vc_var)
ii = np.flatnonzero(vc_var >= 1e-10)
if len(ii) > 0:
vc_vari[ii] = 1 / vc_var[ii]
if len(ii) < len(vc_var):
sing = True
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, vc_vari)
# The residuals
resid = self.endog_li[group_ix]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
viexog = solver(exog)
xtvix += np.dot(exog.T, viexog)
vir = solver(resid)
rvir += np.dot(resid, vir)
for (jj1, matl1, matr1, vsl1, vsr1, sym1) in\
self._gen_dV_dPar(ex_r, solver, group_ix):
ul = _dot(viexog.T, matl1)
ur = _dot(matr1.T, vir)
hess_fere[jj1, :] += np.dot(ul, ur)
if not sym1:
ul = _dot(viexog.T, matr1)
ur = _dot(matl1.T, vir)
hess_fere[jj1, :] += np.dot(ul, ur)
if self.reml:
ul = _dot(viexog.T, matl1)
ur = ul if sym1 else np.dot(viexog.T, matr1)
ulr = _dot(ul, ur.T)
xtax[jj1] += ulr
if not sym1:
xtax[jj1] += ulr.T
ul = _dot(vir, matl1)
ur = ul if sym1 else _dot(vir, matr1)
B[jj1] += np.dot(ul, ur) * (1 if sym1 else 2)
# V^{-1} * dV/d_theta
E = [(vsl1, matr1)]
if not sym1:
E.append((vsr1, matl1))
for (jj2, matl2, matr2, vsl2, vsr2, sym2) in\
self._gen_dV_dPar(ex_r, solver, group_ix, jj1):
re = sum([_multi_dot_three(matr2.T, x[0], x[1].T)
for x in E])
vt = 2 * _dot(_multi_dot_three(vir[None, :], matl2, re),
vir[:, None])
if not sym2:
le = sum([_multi_dot_three(matl2.T, x[0], x[1].T)
for x in E])
vt += 2 * _dot(_multi_dot_three(
vir[None, :], matr2, le), vir[:, None])
D[jj1, jj2] += vt
if jj1 != jj2:
D[jj2, jj1] += vt
rt = _dotsum(vsl2, re.T) / 2
if not sym2:
rt += _dotsum(vsr2, le.T) / 2
hess_re[jj1, jj2] += rt
if jj1 != jj2:
hess_re[jj2, jj1] += rt
if self.reml:
ev = sum([_dot(x[0], _dot(x[1].T, viexog)) for x in E])
u1 = _dot(viexog.T, matl2)
u2 = _dot(matr2.T, ev)
um = np.dot(u1, u2)
F[jj1][jj2] += um + um.T
if not sym2:
u1 = np.dot(viexog.T, matr2)
u2 = np.dot(matl2.T, ev)
um = np.dot(u1, u2)
F[jj1][jj2] += um + um.T
hess_fe -= fac * xtvix / rvir
hess_re = hess_re - 0.5 * fac * (D/rvir - np.outer(B, B) / rvir**2)
hess_fere = -fac * hess_fere / rvir
if self.reml:
QL = [np.linalg.solve(xtvix, x) for x in xtax]
for j1 in range(self.k_re2 + self.k_vc):
for j2 in range(j1 + 1):
a = _dotsum(QL[j1].T, QL[j2])
a -= np.trace(np.linalg.solve(xtvix, F[j1][j2]))
a *= 0.5
hess_re[j1, j2] += a
if j1 > j2:
hess_re[j2, j1] += a
# Put the blocks together to get the Hessian.
m = self.k_fe + self.k_re2 + self.k_vc
hess = np.zeros((m, m))
hess[0:self.k_fe, 0:self.k_fe] = hess_fe
hess[0:self.k_fe, self.k_fe:] = hess_fere.T
hess[self.k_fe:, 0:self.k_fe] = hess_fere
hess[self.k_fe:, self.k_fe:] = hess_re
return hess, sing
def get_scale(self, fe_params, cov_re, vcomp):
"""
Returns the estimated error variance based on given estimates
of the slopes and random effects covariance matrix.
Parameters
----------
fe_params : array_like
The regression slope estimates
cov_re : 2d array_like
Estimate of the random effects covariance matrix
vcomp : array_like
Estimate of the variance components
Returns
-------
scale : float
The estimated error variance.
"""
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = np.linalg.pinv(cov_re)
warnings.warn(_warn_cov_sing)
qf = 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group_ix)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[group_ix]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
mat = solver(resid)
qf += np.dot(resid, mat)
if self.reml:
qf /= (self.n_totobs - self.k_fe)
else:
qf /= self.n_totobs
return qf
def fit(self, start_params=None, reml=True, niter_sa=0,
do_cg=True, fe_pen=None, cov_pen=None, free=None,
full_output=False, method=None, **fit_kwargs):
"""
Fit a linear mixed model to the data.
Parameters
----------
start_params : array_like or MixedLMParams
Starting values for the profile log-likelihood. If not a
`MixedLMParams` instance, this should be an array
containing the packed parameters for the profile
log-likelihood, including the fixed effects
parameters.
reml : bool
If true, fit according to the REML likelihood, else
fit the standard likelihood using ML.
niter_sa : int
Currently this argument is ignored and has no effect
on the results.
cov_pen : CovariancePenalty object
A penalty for the random effects covariance matrix
do_cg : bool, defaults to True
If False, the optimization is skipped and a results
object at the given (or default) starting values is
returned.
fe_pen : Penalty object
A penalty on the fixed effects
free : MixedLMParams object
If not `None`, this is a mask that allows parameters to be
held fixed at specified values. A 1 indicates that the
corresponding parameter is estimated, a 0 indicates that
it is fixed at its starting value. Setting the `cov_re`
component to the identity matrix fits a model with
independent random effects. Note that some optimization
methods do not respect this constraint (bfgs and lbfgs both
work).
full_output : bool
If true, attach iteration history to results
method : str
Optimization method. Can be a scipy.optimize method name,
or a list of such names to be tried in sequence.
**fit_kwargs
Additional keyword arguments passed to fit.
Returns
-------
A MixedLMResults instance.
"""
_allowed_kwargs = ['gtol', 'maxiter', 'eps', 'maxcor', 'ftol',
'tol', 'disp', 'maxls']
for x in fit_kwargs.keys():
if x not in _allowed_kwargs:
warnings.warn("Argument %s not used by MixedLM.fit" % x)
if method is None:
method = ['bfgs', 'lbfgs', 'cg']
elif isinstance(method, str):
method = [method]
for meth in method:
if meth.lower() in ["newton", "ncg"]:
raise ValueError(
"method %s not available for MixedLM" % meth)
self.reml = reml
self.cov_pen = cov_pen
self.fe_pen = fe_pen
self._cov_sing = 0
self._freepat = free
if full_output:
hist = []
else:
hist = None
if start_params is None:
params = MixedLMParams(self.k_fe, self.k_re, self.k_vc)
params.fe_params = np.zeros(self.k_fe)
params.cov_re = np.eye(self.k_re)
params.vcomp = np.ones(self.k_vc)
else:
if isinstance(start_params, MixedLMParams):
params = start_params
else:
# It's a packed array
if len(start_params) == self.k_fe + self.k_re2 + self.k_vc:
params = MixedLMParams.from_packed(
start_params, self.k_fe, self.k_re, self.use_sqrt,
has_fe=True)
elif len(start_params) == self.k_re2 + self.k_vc:
params = MixedLMParams.from_packed(
start_params, self.k_fe, self.k_re, self.use_sqrt,
has_fe=False)
else:
raise ValueError("invalid start_params")
if do_cg:
fit_kwargs["retall"] = hist is not None
if "disp" not in fit_kwargs:
fit_kwargs["disp"] = False
packed = params.get_packed(use_sqrt=self.use_sqrt, has_fe=False)
if niter_sa > 0:
warnings.warn("niter_sa is currently ignored")
# Try optimizing one or more times
for j in range(len(method)):
rslt = super(MixedLM, self).fit(start_params=packed,
skip_hessian=True,
method=method[j],
**fit_kwargs)
if rslt.mle_retvals['converged']:
break
packed = rslt.params
if j + 1 < len(method):
next_method = method[j + 1]
warnings.warn(
"Retrying MixedLM optimization with %s" % next_method,
ConvergenceWarning)
else:
msg = ("MixedLM optimization failed, " +
"trying a different optimizer may help.")
warnings.warn(msg, ConvergenceWarning)
# The optimization succeeded
params = np.atleast_1d(rslt.params)
if hist is not None:
hist.append(rslt.mle_retvals)
converged = rslt.mle_retvals['converged']
if not converged:
gn = self.score(rslt.params)
gn = np.sqrt(np.sum(gn**2))
msg = "Gradient optimization failed, |grad| = %f" % gn
warnings.warn(msg, ConvergenceWarning)
# Convert to the final parameterization (i.e. undo the square
# root transform of the covariance matrix, and the profiling
# over the error variance).
params = MixedLMParams.from_packed(
params, self.k_fe, self.k_re, use_sqrt=self.use_sqrt, has_fe=False)
cov_re_unscaled = params.cov_re
vcomp_unscaled = params.vcomp
fe_params, sing = self.get_fe_params(cov_re_unscaled, vcomp_unscaled)
params.fe_params = fe_params
scale = self.get_scale(fe_params, cov_re_unscaled, vcomp_unscaled)
cov_re = scale * cov_re_unscaled
vcomp = scale * vcomp_unscaled
f1 = (self.k_re > 0) and (np.min(np.abs(np.diag(cov_re))) < 0.01)
f2 = (self.k_vc > 0) and (np.min(np.abs(vcomp)) < 0.01)
if f1 or f2:
msg = "The MLE may be on the boundary of the parameter space."
warnings.warn(msg, ConvergenceWarning)
# Compute the Hessian at the MLE. Note that this is the
# Hessian with respect to the random effects covariance matrix
# (not its square root). It is used for obtaining standard
# errors, not for optimization.
hess, sing = self.hessian(params)
if sing:
warnings.warn(_warn_cov_sing)
hess_diag = np.diag(hess)
if free is not None:
pcov = np.zeros_like(hess)
pat = self._freepat.get_packed(use_sqrt=False, has_fe=True)
ii = np.flatnonzero(pat)
hess_diag = hess_diag[ii]
if len(ii) > 0:
hess1 = hess[np.ix_(ii, ii)]
pcov[np.ix_(ii, ii)] = np.linalg.inv(-hess1)
else:
pcov = np.linalg.inv(-hess)
if np.any(hess_diag >= 0):
msg = ("The Hessian matrix at the estimated parameter values " +
"is not positive definite.")
warnings.warn(msg, ConvergenceWarning)
# Prepare a results class instance
params_packed = params.get_packed(use_sqrt=False, has_fe=True)
results = MixedLMResults(self, params_packed, pcov / scale)
results.params_object = params
results.fe_params = fe_params
results.cov_re = cov_re
results.vcomp = vcomp
results.scale = scale
results.cov_re_unscaled = cov_re_unscaled
results.method = "REML" if self.reml else "ML"
results.converged = converged
results.hist = hist
results.reml = self.reml
results.cov_pen = self.cov_pen
results.k_fe = self.k_fe
results.k_re = self.k_re
results.k_re2 = self.k_re2
results.k_vc = self.k_vc
results.use_sqrt = self.use_sqrt
results.freepat = self._freepat
return MixedLMResultsWrapper(results)
def get_distribution(self, params, scale, exog):
return _mixedlm_distribution(self, params, scale, exog)
class _mixedlm_distribution:
"""
A private class for simulating data from a given mixed linear model.
Parameters
----------
model : MixedLM instance
A mixed linear model
params : array_like
A parameter vector defining a mixed linear model. See
notes for more information.
scale : scalar
The unexplained variance
exog : array_like
An array of fixed effect covariates. If None, model.exog
is used.
Notes
-----
The params array is a vector containing fixed effects parameters,
random effects parameters, and variance component parameters, in
that order. The lower triangle of the random effects covariance
matrix is stored. The random effects and variance components
parameters are divided by the scale parameter.
This class is used in Mediation, and possibly elsewhere.
"""
def __init__(self, model, params, scale, exog):
self.model = model
self.exog = exog if exog is not None else model.exog
po = MixedLMParams.from_packed(
params, model.k_fe, model.k_re, False, True)
self.fe_params = po.fe_params
self.cov_re = scale * po.cov_re
self.vcomp = scale * po.vcomp
self.scale = scale
group_idx = np.zeros(model.nobs, dtype=int)
for k, g in enumerate(model.group_labels):
group_idx[model.row_indices[g]] = k
self.group_idx = group_idx
def rvs(self, n):
"""
Return a vector of simulated values from a mixed linear
model.
The parameter n is ignored, but required by the interface
"""
model = self.model
# Fixed effects
y = np.dot(self.exog, self.fe_params)
# Random effects
u = np.random.normal(size=(model.n_groups, model.k_re))
u = np.dot(u, np.linalg.cholesky(self.cov_re).T)
y += (u[self.group_idx, :] * model.exog_re).sum(1)
# Variance components
for j, _ in enumerate(model.exog_vc.names):
ex = model.exog_vc.mats[j]
v = self.vcomp[j]
for i, g in enumerate(model.group_labels):
exg = ex[i]
ii = model.row_indices[g]
u = np.random.normal(size=exg.shape[1])
y[ii] += np.sqrt(v) * np.dot(exg, u)
# Residual variance
y += np.sqrt(self.scale) * np.random.normal(size=len(y))
return y
class MixedLMResults(base.LikelihoodModelResults, base.ResultMixin):
'''
Class to contain results of fitting a linear mixed effects model.
MixedLMResults inherits from statsmodels.LikelihoodModelResults
Parameters
----------
See statsmodels.LikelihoodModelResults
Attributes
----------
model : class instance
Pointer to MixedLM model instance that called fit.
normalized_cov_params : ndarray
The sampling covariance matrix of the estimates
params : ndarray
A packed parameter vector for the profile parameterization.
The first `k_fe` elements are the estimated fixed effects
coefficients. The remaining elements are the estimated
variance parameters. The variance parameters are all divided
by `scale` and are not the variance parameters shown
in the summary.
fe_params : ndarray
The fitted fixed-effects coefficients
cov_re : ndarray
The fitted random-effects covariance matrix
bse_fe : ndarray
The standard errors of the fitted fixed effects coefficients
bse_re : ndarray
The standard errors of the fitted random effects covariance
matrix and variance components. The first `k_re * (k_re + 1)`
parameters are the standard errors for the lower triangle of
`cov_re`, the remaining elements are the standard errors for
the variance components.
See Also
--------
statsmodels.LikelihoodModelResults
'''
def __init__(self, model, params, cov_params):
super(MixedLMResults, self).__init__(model, params,
normalized_cov_params=cov_params)
self.nobs = self.model.nobs
self.df_resid = self.nobs - np.linalg.matrix_rank(self.model.exog)
@cache_readonly
def fittedvalues(self):
"""
Returns the fitted values for the model.
The fitted values reflect the mean structure specified by the
fixed effects and the predicted random effects.
"""
fit = np.dot(self.model.exog, self.fe_params)
re = self.random_effects
for group_ix, group in enumerate(self.model.group_labels):
ix = self.model.row_indices[group]
mat = []
if self.model.exog_re_li is not None:
mat.append(self.model.exog_re_li[group_ix])
for j in range(self.k_vc):
mat.append(self.model.exog_vc.mats[j][group_ix])
mat = np.concatenate(mat, axis=1)
fit[ix] += np.dot(mat, re[group])
return fit
@cache_readonly
def resid(self):
"""
Returns the residuals for the model.
The residuals reflect the mean structure specified by the
fixed effects and the predicted random effects.
"""
return self.model.endog - self.fittedvalues
@cache_readonly
def bse_fe(self):
"""
Returns the standard errors of the fixed effect regression
coefficients.
"""
p = self.model.exog.shape[1]
return np.sqrt(np.diag(self.cov_params())[0:p])
@cache_readonly
def bse_re(self):
"""
Returns the standard errors of the variance parameters.
The first `k_re x (k_re + 1)` elements of the returned array
are the standard errors of the lower triangle of `cov_re`.
The remaining elements are the standard errors of the variance
components.
Note that the sampling distribution of variance parameters is
strongly skewed unless the sample size is large, so these
standard errors may not give meaningful confidence intervals
or p-values if used in the usual way.
"""
p = self.model.exog.shape[1]
return np.sqrt(self.scale * np.diag(self.cov_params())[p:])
def _expand_re_names(self, group_ix):
names = list(self.model.data.exog_re_names)
for j, v in enumerate(self.model.exog_vc.names):
vg = self.model.exog_vc.colnames[j][group_ix]
na = ["%s[%s]" % (v, s) for s in vg]
names.extend(na)
return names
@cache_readonly
def random_effects(self):
"""
The conditional means of random effects given the data.
Returns
-------
random_effects : dict
A dictionary mapping the distinct `group` values to the
conditional means of the random effects for the group
given the data.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
raise ValueError("Cannot predict random effects from " +
"singular covariance structure.")
vcomp = self.vcomp
k_re = self.k_re
ranef_dict = {}
for group_ix, group in enumerate(self.model.group_labels):
endog = self.model.endog_li[group_ix]
exog = self.model.exog_li[group_ix]
ex_r = self.model._aex_r[group_ix]
ex2_r = self.model._aex_r2[group_ix]
vc_var = self.model._expand_vcomp(vcomp, group_ix)
# Get the residuals relative to fixed effects
resid = endog
if self.k_fe > 0:
expval = np.dot(exog, self.fe_params)
resid = resid - expval
solver = _smw_solver(self.scale, ex_r, ex2_r, cov_re_inv,
1 / vc_var)
vir = solver(resid)
xtvir = _dot(ex_r.T, vir)
xtvir[0:k_re] = np.dot(self.cov_re, xtvir[0:k_re])
xtvir[k_re:] *= vc_var
ranef_dict[group] = pd.Series(
xtvir, index=self._expand_re_names(group_ix))
return ranef_dict
@cache_readonly
def random_effects_cov(self):
"""
Returns the conditional covariance matrix of the random
effects for each group given the data.
Returns
-------
random_effects_cov : dict
A dictionary mapping the distinct values of the `group`
variable to the conditional covariance matrix of the
random effects given the data.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
vcomp = self.vcomp
ranef_dict = {}
for group_ix in range(self.model.n_groups):
ex_r = self.model._aex_r[group_ix]
ex2_r = self.model._aex_r2[group_ix]
label = self.model.group_labels[group_ix]
vc_var = self.model._expand_vcomp(vcomp, group_ix)
solver = _smw_solver(self.scale, ex_r, ex2_r, cov_re_inv,
1 / vc_var)
n = ex_r.shape[0]
m = self.cov_re.shape[0]
mat1 = np.empty((n, m + len(vc_var)))
mat1[:, 0:m] = np.dot(ex_r[:, 0:m], self.cov_re)
mat1[:, m:] = np.dot(ex_r[:, m:], np.diag(vc_var))
mat2 = solver(mat1)
mat2 = np.dot(mat1.T, mat2)
v = -mat2
v[0:m, 0:m] += self.cov_re
ix = np.arange(m, v.shape[0])
v[ix, ix] += vc_var
na = self._expand_re_names(group_ix)
v = pd.DataFrame(v, index=na, columns=na)
ranef_dict[label] = v
return ranef_dict
# Need to override since t-tests are only used for fixed effects
# parameters.
def t_test(self, r_matrix, use_t=None):
"""
Compute a t-test for a each linear hypothesis of the form Rb = q
Parameters
----------
r_matrix : array_like
If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions. It is assumed
that the linear combination is equal to zero.
scale : float, optional
An optional `scale` to use. Default is the scale specified
by the model fit.
use_t : bool, optional
If use_t is None, then the default of the model is used.
If use_t is True, then the p-values are based on the t
distribution.
If use_t is False, then the p-values are based on the normal
distribution.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
The available results have the same elements as the parameter table
in `summary()`.
"""
if r_matrix.shape[1] != self.k_fe:
raise ValueError("r_matrix for t-test should have %d columns"
% self.k_fe)
d = self.k_re2 + self.k_vc
z0 = np.zeros((r_matrix.shape[0], d))
r_matrix = np.concatenate((r_matrix, z0), axis=1)
tst_rslt = super(MixedLMResults, self).t_test(r_matrix, use_t=use_t)
return tst_rslt
def summary(self, yname=None, xname_fe=None, xname_re=None,
title=None, alpha=.05):
"""
Summarize the mixed model regression results.
Parameters
----------
yname : str, optional
Default is `y`
xname_fe : list[str], optional
Fixed effects covariate names
xname_re : list[str], optional
Random effects covariate names
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
info = {}
info["Model:"] = "MixedLM"
if yname is None:
yname = self.model.endog_names
param_names = self.model.data.param_names[:]
k_fe_params = len(self.fe_params)
k_re_params = len(param_names) - len(self.fe_params)
if xname_fe is not None:
if len(xname_fe) != k_fe_params:
msg = "xname_fe should be a list of length %d" % k_fe_params
raise ValueError(msg)
param_names[:k_fe_params] = xname_fe
if xname_re is not None:
if len(xname_re) != k_re_params:
msg = "xname_re should be a list of length %d" % k_re_params
raise ValueError(msg)
param_names[k_fe_params:] = xname_re
info["No. Observations:"] = str(self.model.n_totobs)
info["No. Groups:"] = str(self.model.n_groups)
gs = np.array([len(x) for x in self.model.endog_li])
info["Min. group size:"] = "%.0f" % min(gs)
info["Max. group size:"] = "%.0f" % max(gs)
info["Mean group size:"] = "%.1f" % np.mean(gs)
info["Dependent Variable:"] = yname
info["Method:"] = self.method
info["Scale:"] = self.scale
info["Log-Likelihood:"] = self.llf
info["Converged:"] = "Yes" if self.converged else "No"
smry.add_dict(info)
smry.add_title("Mixed Linear Model Regression Results")
float_fmt = "%.3f"
sdf = np.nan * np.ones((self.k_fe + self.k_re2 + self.k_vc, 6))
# Coefficient estimates
sdf[0:self.k_fe, 0] = self.fe_params
# Standard errors
sdf[0:self.k_fe, 1] = np.sqrt(np.diag(self.cov_params()[0:self.k_fe]))
# Z-scores
sdf[0:self.k_fe, 2] = sdf[0:self.k_fe, 0] / sdf[0:self.k_fe, 1]
# p-values
sdf[0:self.k_fe, 3] = 2 * norm.cdf(-np.abs(sdf[0:self.k_fe, 2]))
# Confidence intervals
qm = -norm.ppf(alpha / 2)
sdf[0:self.k_fe, 4] = sdf[0:self.k_fe, 0] - qm * sdf[0:self.k_fe, 1]
sdf[0:self.k_fe, 5] = sdf[0:self.k_fe, 0] + qm * sdf[0:self.k_fe, 1]
# All random effects variances and covariances
jj = self.k_fe
for i in range(self.k_re):
for j in range(i + 1):
sdf[jj, 0] = self.cov_re[i, j]
sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]
jj += 1
# Variance components
for i in range(self.k_vc):
sdf[jj, 0] = self.vcomp[i]
sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]
jj += 1
sdf = pd.DataFrame(index=param_names, data=sdf)
sdf.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
for col in sdf.columns:
sdf[col] = [float_fmt % x if np.isfinite(x) else ""
for x in sdf[col]]
smry.add_df(sdf, align='r')
return smry
@cache_readonly
def llf(self):
return self.model.loglike(self.params_object, profile_fe=False)
@cache_readonly
def aic(self):
"""Akaike information criterion"""
if self.reml:
return np.nan
if self.freepat is not None:
df = self.freepat.get_packed(use_sqrt=False, has_fe=True).sum() + 1
else:
df = self.params.size + 1
return -2 * (self.llf - df)
@cache_readonly
def bic(self):
"""Bayesian information criterion"""
if self.reml:
return np.nan
if self.freepat is not None:
df = self.freepat.get_packed(use_sqrt=False, has_fe=True).sum() + 1
else:
df = self.params.size + 1
return -2 * self.llf + np.log(self.nobs) * df
def profile_re(self, re_ix, vtype, num_low=5, dist_low=1., num_high=5,
dist_high=1., **fit_kwargs):
"""
Profile-likelihood inference for variance parameters.
Parameters
----------
re_ix : int
If vtype is `re`, this value is the index of the variance
parameter for which to construct a profile likelihood. If
`vtype` is 'vc' then `re_ix` is the name of the variance
parameter to be profiled.
vtype : str
Either 're' or 'vc', depending on whether the profile
analysis is for a random effect or a variance component.
num_low : int
The number of points at which to calculate the likelihood
below the MLE of the parameter of interest.
dist_low : float
The distance below the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
num_high : int
The number of points at which to calculate the likelihood
above the MLE of the parameter of interest.
dist_high : float
The distance above the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
**fit_kwargs
Additional keyword arguments passed to fit.
Returns
-------
An array with two columns. The first column contains the
values to which the parameter of interest is constrained. The
second column contains the corresponding likelihood values.
Notes
-----
Only variance parameters can be profiled.
"""
pmodel = self.model
k_fe = pmodel.k_fe
k_re = pmodel.k_re
k_vc = pmodel.k_vc
endog, exog = pmodel.endog, pmodel.exog
# Need to permute the columns of the random effects design
# matrix so that the profiled variable is in the first column.
if vtype == 're':
ix = np.arange(k_re)
ix[0] = re_ix
ix[re_ix] = 0
exog_re = pmodel.exog_re.copy()[:, ix]
# Permute the covariance structure to match the permuted
# design matrix.
params = self.params_object.copy()
cov_re_unscaled = params.cov_re
cov_re_unscaled = cov_re_unscaled[np.ix_(ix, ix)]
params.cov_re = cov_re_unscaled
ru0 = cov_re_unscaled[0, 0]
# Convert dist_low and dist_high to the profile
# parameterization
cov_re = self.scale * cov_re_unscaled
low = (cov_re[0, 0] - dist_low) / self.scale
high = (cov_re[0, 0] + dist_high) / self.scale
elif vtype == 'vc':
re_ix = self.model.exog_vc.names.index(re_ix)
params = self.params_object.copy()
vcomp = self.vcomp
low = (vcomp[re_ix] - dist_low) / self.scale
high = (vcomp[re_ix] + dist_high) / self.scale
ru0 = vcomp[re_ix] / self.scale
# Define the sequence of values to which the parameter of
# interest will be constrained.
if low <= 0:
raise ValueError("dist_low is too large and would result in a "
"negative variance. Try a smaller value.")
left = np.linspace(low, ru0, num_low + 1)
right = np.linspace(ru0, high, num_high+1)[1:]
rvalues = np.concatenate((left, right))
# Indicators of which parameters are free and fixed.
free = MixedLMParams(k_fe, k_re, k_vc)
if self.freepat is None:
free.fe_params = np.ones(k_fe)
vcomp = np.ones(k_vc)
mat = np.ones((k_re, k_re))
else:
# If a freepat already has been specified, we add the
# constraint to it.
free.fe_params = self.freepat.fe_params
vcomp = self.freepat.vcomp
mat = self.freepat.cov_re
if vtype == 're':
mat = mat[np.ix_(ix, ix)]
if vtype == 're':
mat[0, 0] = 0
else:
vcomp[re_ix] = 0
free.cov_re = mat
free.vcomp = vcomp
klass = self.model.__class__
init_kwargs = pmodel._get_init_kwds()
if vtype == 're':
init_kwargs['exog_re'] = exog_re
likev = []
for x in rvalues:
model = klass(endog, exog, **init_kwargs)
if vtype == 're':
cov_re = params.cov_re.copy()
cov_re[0, 0] = x
params.cov_re = cov_re
else:
params.vcomp[re_ix] = x
# TODO should use fit_kwargs
rslt = model.fit(start_params=params, free=free,
reml=self.reml, cov_pen=self.cov_pen,
**fit_kwargs)._results
likev.append([x * rslt.scale, rslt.llf])
likev = np.asarray(likev)
return likev
class MixedLMResultsWrapper(base.LikelihoodResultsWrapper):
_attrs = {'bse_re': ('generic_columns', 'exog_re_names_full'),
'fe_params': ('generic_columns', 'xnames'),
'bse_fe': ('generic_columns', 'xnames'),
'cov_re': ('generic_columns_2d', 'exog_re_names'),
'cov_re_unscaled': ('generic_columns_2d', 'exog_re_names'),
}
_upstream_attrs = base.LikelihoodResultsWrapper._wrap_attrs
_wrap_attrs = base.wrap.union_dicts(_attrs, _upstream_attrs)
_methods = {}
_upstream_methods = base.LikelihoodResultsWrapper._wrap_methods
_wrap_methods = base.wrap.union_dicts(_methods, _upstream_methods)
def _handle_missing(data, groups, formula, re_formula, vc_formula):
tokens = set()
forms = [formula]
if re_formula is not None:
forms.append(re_formula)
if vc_formula is not None:
forms.extend(vc_formula.values())
from statsmodels.compat.python import asunicode
from io import StringIO
import tokenize
skiptoks = {"(", ")", "*", ":", "+", "-", "**", "/"}
for fml in forms:
# Unicode conversion is for Py2 compatability
rl = StringIO(fml)
def rlu():
line = rl.readline()
return asunicode(line, 'ascii')
g = tokenize.generate_tokens(rlu)
for tok in g:
if tok not in skiptoks:
tokens.add(tok.string)
tokens = sorted(tokens & set(data.columns))
data = data[tokens]
ii = pd.notnull(data).all(1)
if type(groups) != "str":
ii &= | pd.notnull(groups) | pandas.notnull |
import pandas as pd
import numpy as np
import time
import os
import datetime
import math
import requests
def normalize_str(s):
""" Function for name normalization (handle áéíóú). """
return unicodedata.normalize("NFKD", s).encode("ascii","ignore").decode("ascii").upper()
FILES_TO_DOWNLOAD = {
'Argentina_Provinces.csv': 'https://raw.githubusercontent.com/mariano22/argcovidapi/master/csvs/Argentina_Provinces.csv',
'SantaFe_AllData.csv': 'https://raw.githubusercontent.com/mariano22/argcovidapi/master/csvs/SantaFe_AllData.csv',
}
DATA_DIR = './data/'
def _download_file(url, out_file):
response = requests.get(url)
assert response.status_code == 200,\
'Wrong status code at dowloading {}'.format(out_file)
f = open(out_file, "wb")
f.write(response.content)
f.close()
def _download_expired_data():
for csv_fn, csv_remote_fp in FILES_TO_DOWNLOAD.items():
csv_fp = os.path.join(DATA_DIR, csv_fn)
if (not os.path.isfile(csv_fp)) or (time.time()-os.stat(csv_fp).st_mtime>30*60):
print('Downloading',csv_fn)
_download_file(csv_remote_fp, csv_fp)
def _load_National_data(csv_fp):
df_arg = pd.read_csv(csv_fp)
df_arg['LOCATION'] = 'ARGENTINA/' + df_arg['PROVINCIA']
df_arg = df_arg.drop(columns=['PROVINCIA'])
df_arg = df_arg.set_index(['TYPE','LOCATION'])
df_arg = df_arg.rename(columns=lambda colname: pd.to_datetime(colname,format='%d/%m').replace(year=2020))
total_arg = df_arg.groupby(level=[0]).sum()
total_arg['LOCATION']='ARGENTINA'
total_arg = total_arg.reset_index().set_index(['TYPE','LOCATION'])
df_arg = pd.concat([df_arg,total_arg]).sort_index()
df_arg = df_arg[df_arg.columns[:-1]]
return df_arg
def _set_location_safe(row):
location_prefix = 'ARGENTINA/SANTA FE'
if row['DEPARTMENT']=='##TOTAL':
return location_prefix
location_prefix += '/'+row['DEPARTMENT'][3:]
if row['PLACE'].startswith('#'):
return location_prefix
return location_prefix +'/'+ row['PLACE']
def _load_SantaFe_data(csv_fp):
df_safe = pd.read_csv(csv_fp)
df_safe['LOCATION'] = df_safe.apply(_set_location_safe, axis=1)
df_safe = df_safe[ (df_safe['TYPE']=='CONFIRMADOS') & (df_safe['DEPARTMENT']!='##TOTAL') ]
df_safe['LOCATION'] = df_safe['LOCATION'].replace({
'ARGENTINA/SANTA FE/IRIONDO/CLASSON':'ARGENTINA/SANTA FE/IRIONDO/CLASON',
'ARGENTINA/SANTA FE/ROSARIO/VILLA GOB. GALVEZ':'ARGENTINA/SANTA FE/ROSARIO/VILLA GOBERNADOR GALVEZ',
'ARGENTINA/SANTA FE/SAN LORENZO/PUERTO GRAL. SAN MARTIN': 'ARGENTINA/SANTA FE/SAN LORENZO/PUERTO GENERAL SAN MARTIN',
})
df_safe = df_safe.drop(columns=['DEPARTMENT', 'PLACE'])
df_safe = df_safe.set_index(['TYPE','LOCATION'])
df_safe = df_safe.rename(columns=lambda colname: pd.to_datetime(colname,format='%d/%m/%Y'))
return df_safe
def _load_data_time_series(df_geoinfo):
df_arg = _load_National_data(os.path.join(DATA_DIR, 'Argentina_Provinces.csv'))
df_safe = _load_SantaFe_data(os.path.join(DATA_DIR, 'SantaFe_AllData.csv'))
df = pd.concat([df_arg,df_safe])
# Non described dates are 0's
df = df.fillna(0).sort_index()
# Set day 0 (prior any date) with all 0's
day_zero = df.columns[0]-pd.Timedelta(days=1)
df[day_zero]=0
df = df[df.columns.sort_values()]
# Add per capita fields
df_per_capita = pd.merge((df*10000).reset_index(),df_geoinfo[['LOCATION','POPULATION']],on='LOCATION',how='left')
df_per_capita = df_per_capita.fillna(math.inf).set_index(['TYPE','LOCATION'])
df_per_capita = df_per_capita.div(df_per_capita['POPULATION'], axis=0)
df_per_capita = df_per_capita.drop(columns=['POPULATION'])
df_per_capita.index = df_per_capita.index.map(lambda x : (x[0]+'_PER100K',x[1]) )
df = pd.concat([df,df_per_capita]).sort_index()
# Calculate number afected subregions
are_confirmados = df.loc['CONFIRMADOS']>0
are_confirmados['PARENT_LOCATION'] = are_confirmados.index.map(lambda l : os.path.dirname(l))
affected_subregions = are_confirmados.groupby('PARENT_LOCATION').sum()
affected_subregions = affected_subregions.reset_index().rename(columns={'PARENT_LOCATION':'LOCATION'})
affected_subregions = affected_subregions[ affected_subregions['LOCATION']!='' ]
affected_subregions['TYPE']='AFFECTED_SUBREGIONS'
affected_subregions = affected_subregions.set_index(['TYPE','LOCATION'])
df = pd.concat([df,affected_subregions]).sort_index()
# Calculate difference and differnce ratio with last day
df_shift = df.shift(axis=1).fillna(0)
df_diff = df-df_shift
df_diff.index = df_diff.index.map(lambda x : (x[0]+'_DIFF',x[1]) )
df_diff_ration = ((df-df_shift)/df_shift).fillna(0)
df_diff_ration.index = df_diff_ration.index.map(lambda x : (x[0]+'_DIFF_RATIO',x[1]) )
df = pd.concat([df,df_diff,df_diff_ration,affected_subregions])
# Erase non sense columns
nonsense_columns = [ 'ACTIVOS_PER100K_DIFF_RATIO',
'AFFECTED_SUBREGIONS_DIFF_RATIO',
'CONFIRMADOS_PER100K_DIFF_RATIO',
'MUERTOS_PER100K_DIFF_RATIO',
'RECUPERADOS_PER100K_DIFF_RATIO' ]
df = df[df.index.map(lambda i : i[0] not in nonsense_columns)]
return df
def _time_series_melt(df_time_series, df_geoinfo):
df = pd.melt(df_time_series, id_vars=['TYPE','LOCATION'], value_vars=df_time_series.columns[2:], var_name='date')
df = df.pivot_table(index=['LOCATION','date'], columns='TYPE', values='value').reset_index()
df = pd.merge(df,df_geoinfo,on='LOCATION',how='left')
return df
def _only_povs(df):
df = df[ df['LOCATION'].apply(lambda l : l.count('/')==1) ].copy()
df['LOCATION'] = df['LOCATION'].apply(lambda l : l[10:])
return df
def _soon_deprecated_data(df_time_series, df_info):
df_time_series=_only_povs(df_time_series)
df_info=_only_povs(df_info)
df_time_series['2020-03-02 00:00:00']=0.0
df = | pd.melt(df_time_series, id_vars=['TYPE','LOCATION'], value_vars=df_time_series.columns[2:], var_name='date') | pandas.melt |
import sqlite3
import pandas as pd
import sqlalchemy
from matplotlib.pyplot import hist
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
from datetime import datetime
import webbrowser
def reps():
try:
engine = sqlalchemy.create_engine('sqlite:///ecom.db')
#Sales Report
df = pd.read_sql('SELECT desc as ITEM, price as "Unit Price", tag as Category, qty as Quantity, strftime("%Y %m %d", date) as "Sale Date" FROM cart WHERE status = "C" ORDER BY date DESC', engine)
df2 = df.set_index('ITEM')
df2["Line Total"] = (df2["Unit Price"]*df2["Quantity"]).round(1)
df2.loc['Total'] = df2.sum(numeric_only=True)
df2.fillna('', inplace=True)
df6 = pd.DataFrame(df2)
fig, ax =plt.subplots(1,1)
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
date = str(datetime.date(datetime.now()))
ax.set_title('Sales Report'+'\nDate:'+ date, loc='right')
table = ax.table(cellText=df6.values, rowLabels=df6.index, colLabels=df6.columns, loc='center')
w, h = table[0,1].get_width(), table[0,1].get_height()
table.add_cell(0, -1, w,h, text=df6.index.name, loc='center')
fig.tight_layout()
fig=ax.get_figure()
fig.savefig('Sales_Report.pdf')
#Top Sales Report
df3 = pd.read_sql('SELECT desc as ITEM, price as "Unit Price", tag as Category, qty as Quantity, strftime("%Y %m %d", date) as "Sale Date" FROM cart WHERE status = "C" ORDER BY Quantity DESC', engine)
df4 = df3.head()
fig, ax =plt.subplots()
pp = ax.hist(df4.ITEM, weights=df4.Quantity)
ax.set_title("Top Sales")
ax.set_xlabel('ITEMS')
ax.set_ylabel('Quantity')
print(pp)
fig=ax.get_figure()
fig.savefig('Top_Sales.pdf')
#Open Orders Report
df8 = | pd.read_sql('SELECT desc as ITEM, price as "Unit Price", tag as Category, qty as Quantity, strftime("%Y %m %d", date) as "Sale Date" FROM cart WHERE status = "O" ORDER BY date DESC', engine) | pandas.read_sql |
import pandas as pd
import numpy as np
print(pd.options.display.max_rows) #by default it is 60
pd.options.display.max_rows = 5
print(pd.options.display.max_rows) #Now it is 10
df = pd.read_csv('/media/nahid/New Volume/GitHub/Pandas/sample.csv')
print(df)
'''
company numEmps category ... state fundedDate raisedAmt
0 LifeLock NaN web ... AZ 1-May-07 6850000
1 LifeLock NaN web ... AZ 1-Oct-06 6000000
.. ... ... ... ... ... ... ...
97 MeeVee NaN web ... CA 1-Feb-06 6500000
98 MeeVee NaN web ... CA 1-Aug-06 8000000
'''
pd.options.display.max_rows = 100
print(df) #print successfully all the rows in sample.csv data file
#we also can use instead of pd.options.display.max_rows = 5
#*************pd.get_option(arg), set_option(agr,val)*******************
a = pd.get_option("display.max_rows")
print(a) #100
pd.set_option("display.max_rows", 20)
a = pd.get_option("display.max_rows")
print(a) #20
#*******************Example********************************************
df = pd.DataFrame(np.random.randn(10,5))
print(df)
print(df.shape)
pd.set_option("max_rows",5)
print(df)
'''
0 1 2 3 4
0 -0.957296 0.779242 -1.625559 2.116592 -0.269248
1 0.109035 -0.003971 -0.746726 -1.271288 -0.643878
.. ... ... ... ... ...
8 -0.550164 0.972242 2.426685 0.408818 -0.136869
9 -0.472941 -0.624765 0.228406 -0.368229 0.101187
'''
#Here we can see 4 line only because the rows evenly spread here
#***************Stretching the DataFrame across pages*****************
df = pd.DataFrame(np.random.randn(5,7))
print(df)
'''
0 1 2 ... 9 10 11
0 0.017573 0.533890 -1.039920 ... 1.055588 0.230688 -1.185961
1 0.994916 1.730381 -0.265662 ... -0.637594 -0.468516 -1.197159
2 -0.470152 -0.702236 -0.249671 ... 0.956581 -1.167124 -0.775214
3 -0.113243 0.110624 0.822606 ... 1.375379 -0.564423 0.292864
4 -0.681015 -0.001743 0.170444 ... 0.387591 -0.009591 -0.263648
'''
'''display.expand_frame_repr allows from the representation of dataframes to stretch
across pages, warpped over the full columns vs row-wise'''
pd.set_option("expand_frame_repr", True)
pd.options.display.expand_frame_repr = 15
print(df)
#https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.set_option.html
pd.set_option('max_colwidth',6)
dict = {"Name":pd.Series(["<NAME>", "Rafi", "Meem"]),
"Age":pd.Series([21,22,21]),
"Weight":pd.Series([48,75,76]),
"Height": | pd.Series([5.3, 5.8, 5.6]) | pandas.Series |
# TODO move away from this test generator style since its we need to manage the generator file,
# which is no longer in this project workspace, as well as the output test file.
## ##
# #
# THIS TEST WAS AUTOGENERATED BY groupby_test_generator.py #
# #
##
# TODO refactor this into table driven tests using pytest parameterize since each test body follows the same structure
# and a single test body with multiple test tabe entries will be more readable and flexible.
from .groupby_unit_test_parameters import *
import pandas as pd
import riptable as rt
import unittest
class autogenerated_gb_tests(unittest.TestCase):
def safe_assert(self, ary1, ary2):
for a, b in zip(ary1, ary2):
if a == a and b == b:
self.assertAlmostEqual(a, b, places=7)
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(1, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(4, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(7, 1, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(2, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(5, 2, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(1, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(4, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(7, 3, 0.1, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(2, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median']
test_class = groupby_everything(5, 1, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(1, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(4, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median']
test_class = groupby_everything(7, 2, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(2, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median']
test_class = groupby_everything(5, 3, 0.30, ['median'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 1, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 2, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 3, 0.1, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_1(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 1, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_1__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(1, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_4__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(4, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_7__nkeycols_2(self):
aggs = ['median', 'min']
test_class = groupby_everything(7, 2, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_2__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(2, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_median_min__symb_ratio_0300__nvalcols_5__nkeycols_3(self):
aggs = ['median', 'min']
test_class = groupby_everything(5, 3, 0.30, ['median', 'min'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 1, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_5__nkeycols_2(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 2, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_1__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_4__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_01__nvalcols_7__nkeycols_3(self):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 3, 0.1, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 1, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(1, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(4, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(7, 2, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(2, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_mean_max__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'mean', 'max']
test_class = groupby_everything(5, 3, 0.30, ['var', 'mean', 'max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 1, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_2__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_5__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 2, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_1__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_4__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_01__nvalcols_7__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 3, 0.1, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_1(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 1, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_1__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(1, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_4__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(4, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_7__nkeycols_2(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(7, 2, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_2__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(2, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_var_max_sum_mean__symb_ratio_0300__nvalcols_5__nkeycols_3(
self,
):
aggs = ['var', 'max', 'sum', 'mean']
test_class = groupby_everything(5, 3, 0.30, ['var', 'max', 'sum', 'mean'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_1__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(1, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_4__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(4, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_7__nkeycols_1(self):
aggs = ['max']
test_class = groupby_everything(7, 1, 0.1, ['max'])
pd_out = (
pd.DataFrame(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
sf_out = (
rt.Dataset(test_class.data)
.groupby(KEY_COLUMN_NAMES[: test_class.key_columns])
.agg(test_class.aggregation_functions)
)
for func in aggs:
for i in range(0, test_class.val_columns):
column = VAL_COLUMN_NAMES[i]
self.safe_assert(pd_out[column][func], sf_out[func.title()][column])
def test_multikey___aggs_max__symb_ratio_01__nvalcols_2__nkeycols_2(self):
aggs = ['max']
test_class = groupby_everything(2, 2, 0.1, ['max'])
pd_out = (
| pd.DataFrame(test_class.data) | pandas.DataFrame |
from multiprocessing.sharedctypes import Value
import pytest
import re
import numpy as np
import pandas as pd
from formulae.matrices import (
design_matrices,
ResponseVector,
CommonEffectsMatrix,
GroupEffectsMatrix,
)
from formulae.parser import ParseError
from formulae.resolver import ResolverError
@pytest.fixture(scope="module")
def data():
rng = np.random.default_rng(1234)
size = 20
data = pd.DataFrame(
{
"y": rng.uniform(size=size),
"x1": rng.uniform(size=size),
"x2": rng.uniform(size=size),
"x3": [1, 2, 3, 4] * 5,
"f": rng.choice(["A", "B"], size=size),
"g": rng.choice(["A", "B"], size=size),
"h": rng.choice(["A", "B"], size=size),
"j": rng.choice(["A", "B"], size=size),
}
)
data["g"] = pd.Categorical(data["g"], categories=["A", "B"], ordered=True)
return data
@pytest.fixture(scope="module")
def pixel():
"""
X-ray pixel intensities over time dataset from R nlme package.
The output is a subset of this dataframe.
"""
from os.path import dirname, join
data_dir = join(dirname(__file__), "data")
data = pd.read_csv(join(data_dir, "Pixel.csv"))
data["Dog"] = data["Dog"].astype("category")
data["day"] = data["day"].astype("category")
data = data[data["Dog"].isin([1, 2, 3])]
data = data[data["day"].isin([2, 4, 6])]
data = data.sort_values(["Dog", "Side", "day"])
data = data.reset_index(drop=True)
return data
@pytest.fixture(scope="module")
def beetle():
data = pd.DataFrame(
{
"x": np.array([1.6907, 1.7242, 1.7552, 1.7842, 1.8113, 1.8369, 1.8610, 1.8839]),
"n": np.array([59, 60, 62, 56, 63, 59, 62, 60]),
"y": np.array([6, 13, 18, 28, 52, 53, 61, 60]),
}
)
return data
def compare_dicts(d1, d2):
if len(d1) != len(d2):
return False
if set(d1.keys()) != set(d2.keys()):
return False
for key in d1.keys():
if type(d1[key]) != type(d2[key]):
return False
elif isinstance(d1[key], dict):
outcome = compare_dicts(d1[key], d2[key])
if not outcome:
return False
elif isinstance(d1[key], np.ndarray):
if not all(d1[key] == d2[key]):
return False
else:
if d1[key] != d2[key]:
return False
return True
def test_empty_formula(data):
with pytest.raises(ValueError):
design_matrices("", data)
def test_empty_model(data):
dm = design_matrices("y ~ 0", data)
assert dm.common == None
assert dm.group == None
def test_common_intercept_only_model(data):
dm = design_matrices("y ~ 1", data)
assert len(dm.common.terms) == 1
assert dm.common.terms["Intercept"].kind == "intercept"
assert dm.common.terms["Intercept"].labels == ["Intercept"]
assert all(dm.common.design_matrix == 1)
assert dm.group == None
def test_group_specific_intercept_only(data):
dm = design_matrices("y ~ 0 + (1|g)", data)
assert len(dm.group.terms) == 1
assert dm.group.terms["1|g"].kind == "intercept"
assert dm.group.terms["1|g"].groups == ["A", "B"]
assert dm.group.terms["1|g"].labels == ["1|g[A]", "1|g[B]"]
assert dm.common == None
def test_common_predictor(data):
dm = design_matrices("y ~ x1", data)
assert list(dm.common.terms) == ["Intercept", "x1"]
assert dm.common.terms["x1"].kind == "numeric"
assert dm.common.terms["x1"].labels == ["x1"]
assert dm.common.terms["x1"].levels is None
# 'f' does not span intercept because the intercept is already icluded
dm = design_matrices("y ~ f", data)
assert list(dm.common.terms) == ["Intercept", "f"]
assert dm.common.terms["f"].kind == "categoric"
assert dm.common.terms["f"].labels == [f"f[{l}]" for l in sorted(data["f"].unique())[1:]]
assert dm.common.terms["f"].levels == sorted(list(data["f"].unique()))[1:]
assert dm.common.terms["f"].spans_intercept == False
def test_categoric_encoding(data):
# No intercept, one categoric predictor
dm = design_matrices("y ~ 0 + f", data)
assert list(dm.common.terms) == ["f"]
assert dm.common.terms["f"].kind == "categoric"
assert dm.common.terms["f"].labels == [f"f[{l}]" for l in sorted(data["f"].unique())]
assert dm.common.terms["f"].levels == sorted(list(data["f"].unique()))
assert dm.common.terms["f"].spans_intercept is True
assert dm.common.design_matrix.shape == (20, 2)
# Intercept, one categoric predictor
dm = design_matrices("y ~ 1 + f", data)
assert list(dm.common.terms) == ["Intercept", "f"]
assert dm.common.terms["f"].kind == "categoric"
assert dm.common.terms["f"].labels == [f"f[{l}]" for l in sorted(data["f"].unique())[1:]]
assert dm.common.terms["f"].levels == sorted(list(data["f"].unique()))[1:]
assert dm.common.terms["f"].spans_intercept is False
assert dm.common.design_matrix.shape == (20, 2)
# No intercept, two additive categoric predictors
dm = design_matrices("y ~ 0 + f + g", data)
assert list(dm.common.terms) == ["f", "g"]
assert dm.common.terms["f"].kind == "categoric"
assert dm.common.terms["g"].kind == "categoric"
assert dm.common.terms["f"].labels == [f"f[{l}]" for l in sorted(data["f"].unique())]
assert dm.common.terms["g"].labels == [f"g[{l}]" for l in sorted(data["g"].unique())[1:]]
assert dm.common.terms["f"].levels == sorted(list(data["f"].unique()))
assert dm.common.terms["g"].levels == sorted(list(data["g"].unique()))[1:]
assert dm.common.terms["f"].spans_intercept is True
assert dm.common.terms["g"].spans_intercept is False
assert dm.common.design_matrix.shape == (20, 3)
# Intercept, two additive categoric predictors
dm = design_matrices("y ~ 1 + f + g", data)
assert list(dm.common.terms) == ["Intercept", "f", "g"]
assert dm.common.terms["f"].kind == "categoric"
assert dm.common.terms["g"].kind == "categoric"
assert dm.common.terms["f"].labels == [f"f[{l}]" for l in sorted(data["f"].unique())[1:]]
assert dm.common.terms["g"].labels == [f"g[{l}]" for l in sorted(data["g"].unique())[1:]]
assert dm.common.terms["f"].levels == sorted(list(data["f"].unique()))[1:]
assert dm.common.terms["g"].levels == sorted(list(data["g"].unique()))[1:]
assert dm.common.terms["f"].spans_intercept is False
assert dm.common.terms["g"].spans_intercept is False
assert dm.common.design_matrix.shape == (20, 3)
# No intercept, two categoric predictors with interaction
dm = design_matrices("y ~ 0 + f + g + f:g", data)
assert list(dm.common.terms) == ["f", "g", "f:g"]
assert dm.common.terms["f"].kind == "categoric"
assert dm.common.terms["g"].kind == "categoric"
assert dm.common.terms["f:g"].kind == "interaction"
assert dm.common.terms["f"].labels == [f"f[{l}]" for l in sorted(data["f"].unique())]
assert dm.common.terms["g"].labels == [f"g[{l}]" for l in sorted(data["g"].unique())[1:]]
assert dm.common.terms["f:g"].labels == ["f[B]:g[B]"]
assert dm.common.terms["f"].levels == sorted(list(data["f"].unique()))
assert dm.common.terms["g"].levels == sorted(list(data["g"].unique()))[1:]
assert dm.common.terms["f:g"].levels == ["B, B"]
assert dm.common.terms["f"].spans_intercept is True
assert dm.common.terms["g"].spans_intercept is False
assert dm.common.terms["f:g"].spans_intercept is False
assert dm.common.terms["f:g"].components[0].spans_intercept is False
assert dm.common.terms["f:g"].components[1].spans_intercept is False
assert dm.common.design_matrix.shape == (20, 4)
# Intercept, two categoric predictors with interaction
dm = design_matrices("y ~ 1 + f + g + f:g", data)
assert list(dm.common.terms) == ["Intercept", "f", "g", "f:g"]
assert dm.common.terms["f"].kind == "categoric"
assert dm.common.terms["g"].kind == "categoric"
assert dm.common.terms["f:g"].kind == "interaction"
assert dm.common.terms["f"].labels == [f"f[{l}]" for l in sorted(data["f"].unique())[1:]]
assert dm.common.terms["g"].labels == [f"g[{l}]" for l in sorted(data["g"].unique())[1:]]
assert dm.common.terms["f:g"].labels == ["f[B]:g[B]"]
assert dm.common.terms["f"].levels == sorted(list(data["f"].unique()))[1:]
assert dm.common.terms["g"].levels == sorted(list(data["g"].unique()))[1:]
assert dm.common.terms["f"].spans_intercept is False
assert dm.common.terms["g"].spans_intercept is False
assert dm.common.terms["f:g"].components[0].spans_intercept is False
assert dm.common.terms["f:g"].components[1].spans_intercept is False
assert dm.common.design_matrix.shape == (20, 4)
# No intercept, interaction between two categorics
dm = design_matrices("y ~ 0 + f:g", data)
assert list(dm.common.terms) == ["f:g"]
assert dm.common.terms["f:g"].kind == "interaction"
assert dm.common.terms["f:g"].labels == ["f[A]:g[A]", "f[A]:g[B]", "f[B]:g[A]", "f[B]:g[B]"]
assert dm.common.terms["f:g"].spans_intercept is True
assert dm.common.terms["f:g"].components[0].spans_intercept is True
assert dm.common.terms["f:g"].components[1].spans_intercept is True
assert dm.common.design_matrix.shape == (20, 4)
# Intercept, interaction between two categorics
# It adds "g" -> It uses Patsy algorithm..
dm = design_matrices("y ~ 1 + f:g", data)
assert list(dm.common.terms) == ["Intercept", "g", "f:g"]
assert dm.common.terms["g"].spans_intercept is False
assert dm.common.terms["f:g"].kind == "interaction"
assert dm.common.terms["f:g"].labels == ["f[B]:g[A]", "f[B]:g[B]"]
assert dm.common.terms["f:g"].spans_intercept is False
assert dm.common.terms["f:g"].components[0].spans_intercept is False
assert dm.common.terms["f:g"].components[1].spans_intercept is True
assert dm.common.design_matrix.shape == (20, 4)
# Same than before
dm = design_matrices("y ~ 1 + g + f:g", data)
assert list(dm.common.terms) == ["Intercept", "g", "f:g"]
assert dm.common.terms["g"].spans_intercept is False
assert dm.common.terms["f:g"].kind == "interaction"
assert dm.common.terms["f:g"].labels == ["f[B]:g[A]", "f[B]:g[B]"]
assert dm.common.terms["f:g"].spans_intercept is False
assert dm.common.terms["f:g"].components[0].spans_intercept is False
assert dm.common.terms["f:g"].components[1].spans_intercept is True
assert dm.common.design_matrix.shape == (20, 4)
def test_categoric_encoding_with_numeric_interaction(data):
dm = design_matrices("y ~ x1 + x2 + f:g + h:j:x2", data)
assert list(dm.common.terms) == ["Intercept", "x1", "x2", "g", "f:g", "j:x2", "h:j:x2"]
assert dm.common.terms["g"].spans_intercept is False
assert dm.common.terms["f:g"].kind == "interaction"
assert dm.common.terms["f:g"].labels == ["f[B]:g[A]", "f[B]:g[B]"]
assert dm.common.terms["f:g"].components[0].spans_intercept is False
assert dm.common.terms["f:g"].components[1].spans_intercept is True
assert dm.common.terms["j:x2"].spans_intercept is False
assert dm.common.terms["h:j:x2"].components[0].spans_intercept is False
assert dm.common.terms["h:j:x2"].components[1].spans_intercept is True
assert dm.common.terms["h:j:x2"].components[2].kind == "numeric"
def test_interactions(data):
# These two models are the same
dm = design_matrices("y ~ f * g", data)
dm2 = design_matrices("y ~ f + g + f:g", data)
assert dm2.common.terms == dm.common.terms
# When no intercept too
dm = design_matrices("y ~ 0 + f * g", data)
dm2 = design_matrices("y ~ 0 + f + g + f:g", data)
assert dm2.common.terms == dm.common.terms
# Mix of numeric/categoric
# "g" in "g" -> does not span intercept
# "g" in "x1:g" -> does not span intercept because x1 is present in formula
dm = design_matrices("y ~ x1 + g + x1:g", data)
assert list(dm.common.terms) == ["Intercept", "x1", "g", "x1:g"]
assert dm.common.terms["g"].kind == "categoric"
assert dm.common.terms["g"].spans_intercept is False
assert dm.common.terms["x1:g"].components[1].spans_intercept is False
# "g" in "g" -> reduced
# "g" in "x1:g" -> full because x1 is not present in formula
dm = design_matrices("y ~ g + x1:g", data)
assert list(dm.common.terms) == ["Intercept", "g", "x1:g"]
assert dm.common.terms["g"].kind == "categoric"
assert dm.common.terms["g"].spans_intercept is False
assert dm.common.terms["x1:g"].components[1].spans_intercept is True
# "g" in "x1:x2:g" is full, because x1:x2 is a new group and we don't have x1:x2 in the model
dm = design_matrices("y ~ x1 + g + x1:g + x1:x2:g", data)
assert list(dm.common.terms) == ["Intercept", "x1", "g", "x1:g", "x1:x2:g"]
assert dm.common.terms["g"].kind == "categoric"
assert dm.common.terms["g"].spans_intercept is False
assert dm.common.terms["x1:g"].components[1].spans_intercept is False
assert dm.common.terms["x1:x2:g"].components[2].spans_intercept is True
# "g" in "x1:x2:g" is reduced, because x1:x2 is a new group and we have x1:x2 in the model
dm = design_matrices("y ~ x1 + g + x1:x2 + x1:g + x1:x2:g", data)
assert list(dm.common.terms) == ["Intercept", "x1", "g", "x1:x2", "x1:g", "x1:x2:g"]
assert dm.common.terms["g"].kind == "categoric"
assert dm.common.terms["g"].spans_intercept is False
assert dm.common.terms["x1:g"].components[1].spans_intercept is False
assert dm.common.terms["x1:x2:g"].components[2].spans_intercept is False
# And now, since we don't have intercept, x1 and x1:x2 all "g" are full
dm = design_matrices("y ~ 0 + g + x1:g + x1:x2:g", data)
assert list(dm.common.terms) == ["g", "x1:g", "x1:x2:g"]
assert dm.common.terms["g"].kind == "categoric"
assert dm.common.terms["g"].spans_intercept is True
assert dm.common.terms["x1:g"].components[1].spans_intercept is True
assert dm.common.terms["x1:x2:g"].components[2].spans_intercept is True
# Two numerics
dm = design_matrices("y ~ x1:x2", data)
assert "x1:x2" in dm.common.terms
assert np.allclose(dm.common["x1:x2"][:, 0], data["x1"] * data["x2"])
def test_built_in_transforms(data):
# {...} gets translated to I(...)
dm = design_matrices("y ~ {x1 + x2}", data)
assert list(dm.common.terms) == ["Intercept", "I(x1 + x2)"]
assert dm.common.terms["I(x1 + x2)"].kind == "numeric"
assert np.allclose(dm.common["I(x1 + x2)"], (data["x1"] + data["x2"]).values[:, None])
dm2 = design_matrices("y ~ I(x1 + x2)", data)
assert compare_dicts(dm.common.terms, dm2.common.terms)
# center()
dm = design_matrices("y ~ center(x1)", data)
assert list(dm.common.terms) == ["Intercept", "center(x1)"]
assert dm.common.terms["center(x1)"].kind == "numeric"
assert np.allclose(dm.common["center(x1)"].mean(), 0)
# scale()
dm = design_matrices("y ~ scale(x1)", data)
assert list(dm.common.terms) == ["Intercept", "scale(x1)"]
assert dm.common.terms["scale(x1)"].kind == "numeric"
assert np.allclose(dm.common["scale(x1)"].mean(), 0)
assert np.allclose(dm.common["scale(x1)"].std(), 1)
# standardize(), alias of scale()
dm = design_matrices("y ~ standardize(x1)", data)
assert list(dm.common.terms) == ["Intercept", "standardize(x1)"]
assert dm.common.terms["standardize(x1)"].kind == "numeric"
assert np.allclose(dm.common["standardize(x1)"].mean(), 0)
assert np.allclose(dm.common["standardize(x1)"].std(), 1)
# C()
# Intercept, no extra arguments, reference is first value observed
dm = design_matrices("y ~ C(x3)", data)
assert list(dm.common.terms) == ["Intercept", "C(x3)"]
assert dm.common.terms["C(x3)"].kind == "categoric"
assert dm.common.terms["C(x3)"].spans_intercept is False
assert dm.common.terms["C(x3)"].levels == ["2", "3", "4"]
assert dm.common.terms["C(x3)"].labels == ["C(x3)[2]", "C(x3)[3]", "C(x3)[4]"]
# No intercept, no extra arguments
dm = design_matrices("y ~ 0 + C(x3)", data)
assert list(dm.common.terms) == ["C(x3)"]
assert dm.common.terms["C(x3)"].kind == "categoric"
assert dm.common.terms["C(x3)"].spans_intercept is True
assert dm.common.terms["C(x3)"].levels == ["1", "2", "3", "4"]
assert dm.common.terms["C(x3)"].labels == ["C(x3)[1]", "C(x3)[2]", "C(x3)[3]", "C(x3)[4]"]
# Specify levels, different to observed
lvls = [3, 2, 4, 1] # noqa
dm = design_matrices("y ~ C(x3, levels=lvls)", data)
assert dm.common.terms["C(x3, levels = lvls)"].kind == "categoric"
assert dm.common.terms["C(x3, levels = lvls)"].levels == ["2", "4", "1"]
# Pass a reference not in the data
with pytest.raises(ValueError):
dm = design_matrices("y ~ C(x3, 5)", data)
# Pass categoric, remains unchanged
dm = design_matrices("y ~ C(f)", data)
dm2 = design_matrices("y ~ f", data)
d1 = dm.common.terms["C(f)"]
d2 = dm2.common.terms["f"]
assert d1.kind == d2.kind
assert d1.levels == d2.levels
assert d1.spans_intercept == d2.spans_intercept
assert not d1.labels == d2.labels # because one is 'C(f)' and other is 'f'
assert all(dm.common["C(f)"] == dm2.common["f"])
def test_external_transforms(data):
dm = design_matrices("y ~ np.exp(x1)", data)
assert np.allclose(dm.common["np.exp(x1)"][:, 0], np.exp(data["x1"]))
def add_ten(x):
return x + 10
dm = design_matrices("y ~ add_ten(x1)", data)
assert np.allclose(dm.common["add_ten(x1)"][:, 0], data["x1"] + 10)
def test_non_syntactic_names():
data = pd.DataFrame(
{
"My response": np.random.normal(size=10),
"$$#1@@": np.random.normal(size=10),
"-- ! Hi there!": np.random.normal(size=10),
}
)
dm = design_matrices("`My response` ~ `$$#1@@`*`-- ! Hi there!`", data)
assert list(dm.common.terms.keys()) == [
"Intercept",
"$$#1@@",
"-- ! Hi there!",
"$$#1@@:-- ! Hi there!",
]
assert np.allclose(dm.common["$$#1@@"][:, 0], data["$$#1@@"])
assert np.allclose(dm.common["-- ! Hi there!"][:, 0], data["-- ! Hi there!"])
assert np.allclose(dm.common["-- ! Hi there!"][:, 0], data["-- ! Hi there!"])
assert np.allclose(
dm.common["$$#1@@:-- ! Hi there!"][:, 0], data["$$#1@@"] * data["-- ! Hi there!"]
)
def test_categoric_group_specific():
data = pd.DataFrame(
{
"BP": np.random.normal(size=30),
"BMI": np.random.normal(size=30),
"age_grp": np.random.choice([0, 1, 2], size=30),
}
)
dm = design_matrices("BP ~ 0 + (C(age_grp)|BMI)", data)
list(dm.group.terms.keys()) == ["1|BMI", "C(age_grp)[1]|BMI", "C(age_grp)[2]|BMI"]
dm = design_matrices("BP ~ 0 + (0 + C(age_grp)|BMI)", data)
list(dm.group.terms) == ["C(age_grp)[0]|BMI", "C(age_grp)[1]|BMI", "C(age_grp)[2]|BMI"]
def test_interactions_in_group_specific(pixel):
# We have group specific terms with the following characteristics
# 1. expr=categoric, factor=categoric
# 2. expr=intercept, factor=categoric
# 3. expr=intercept, factor=interaction between categorics
# The desing matrices used for the comparison are loaded from text files.
# The encoding is implicitly checked when comparing names.
from os.path import dirname, join
data_dir = join(dirname(__file__), "data/group_specific")
slope_by_dog_original = np.loadtxt(join(data_dir, "slope_by_dog.txt"))
intercept_by_side_original = np.loadtxt(join(data_dir, "intercept_by_side.txt"))
intercept_by_side_dog_original = np.loadtxt(join(data_dir, "intercept_by_side_dog.txt"))
dog_and_side_by_day_original = np.loadtxt(join(data_dir, "dog_and_side_by_day.txt"))
dm = design_matrices("pixel ~ day + (0 + day | Dog) + (1 | Side/Dog)", pixel)
slope_by_dog = dm.group["day|Dog"]
intercept_by_side = dm.group["1|Side"]
intercept_by_side_dog = dm.group["1|Side:Dog"]
# Assert values in the design matrix
assert (slope_by_dog == slope_by_dog_original).all()
assert (intercept_by_side == intercept_by_side_original).all()
assert (intercept_by_side_dog == intercept_by_side_dog_original).all()
# Assert labels
names = [f"day[{d}]|Dog[{g}]" for g in [1, 2, 3] for d in [2, 4, 6]]
assert dm.group.terms["day|Dog"].labels == names
names = [f"1|Side[{s}]" for s in ["L", "R"]]
assert dm.group.terms["1|Side"].labels == names
names = [f"1|Side[{s}]:Dog[{d}]" for s in ["L", "R"] for d in [1, 2, 3]]
assert dm.group.terms["1|Side:Dog"].labels == names
# Another design matrix
dm = design_matrices("(0 + Dog:Side | day)", pixel)
dog_and_side_by_day = dm.group["Dog:Side|day"]
# Assert values in the design matrix
assert (dog_and_side_by_day == dog_and_side_by_day_original).all()
# Assert labels
names = [
f"Dog[{d}]:Side[{s}]|day[{g}]" for g in [2, 4, 6] for d in [1, 2, 3] for s in ["L", "R"]
]
assert dm.group.terms["Dog:Side|day"].labels == names
def test_prop_response(beetle):
response = design_matrices("prop(y, n) ~ x", beetle).response
assert response.kind == "proportion"
assert response.design_vector.shape == (8, 2)
assert (np.less_equal(response.design_vector[:, 0], response.design_vector[:, 1])).all()
# Admit integer values for 'n'
response = design_matrices("prop(y, 62) ~ x", beetle).response
assert response.kind == "proportion"
assert response.design_vector.shape == (8, 2)
assert (np.less_equal(response.design_vector[:, 0], response.design_vector[:, 1])).all()
# Use aliases
response = design_matrices("proportion(y, n) ~ x", beetle).response
assert response.kind == "proportion"
assert response.design_vector.shape == (8, 2)
assert (np.less_equal(response.design_vector[:, 0], response.design_vector[:, 1])).all()
# Use aliases
response = design_matrices("p(y, n) ~ x", beetle).response
assert response.kind == "proportion"
assert response.design_vector.shape == (8, 2)
assert (np.less_equal(response.design_vector[:, 0], response.design_vector[:, 1])).all()
def test_prop_response_fails():
# x larger than n
with pytest.raises(ValueError):
design_matrices("prop(x, n) ~ 1", pd.DataFrame({"x": [2, 3], "n": [1, 2]}))
# x and/or n not integer
with pytest.raises(ValueError):
design_matrices("prop(x, n) ~ 1", pd.DataFrame({"x": [2, 3.3], "n": [4, 4]}))
with pytest.raises(ValueError):
design_matrices("prop(x, n) ~ 1", pd.DataFrame({"x": [2, 3], "n": [4.3, 4]}))
# x not a variable name
with pytest.raises(ValueError):
design_matrices("prop(10, n) ~ 1", pd.DataFrame({"x": [2, 3], "n": [1, 2]}))
# trials must be integer, not float
with pytest.raises(ValueError):
design_matrices("prop(x, 3.4) ~ 1", pd.DataFrame({"x": [2, 3], "n": [1, 2]}))
def test_categoric_responses():
data = pd.DataFrame(
{
"y1": np.random.choice(["A", "B", "C"], size=30),
"y2": np.random.choice(["A", "B"], size=30),
"y3": np.random.choice(["Hi there", "Bye bye", "What??"], size=30),
"x": np.random.normal(size=30),
}
)
# Multi-level response. Response is a design matrix of dummies that span the intercept.
response = design_matrices("y1 ~ x", data).response
assert list(np.unique(response.design_vector)) == [0, 1]
assert response.levels == ["A", "B", "C"]
assert response.binary is False
assert response.success is None
# Multi-level response, explicitly converted to binary
response = design_matrices("y1['A'] ~ x", data).response
assert list(np.unique(response.design_vector)) == [0, 1]
assert response.levels is None # when binary, levels is None by design
assert response.binary is True
assert response.success == "A"
# Response has two levels but it is not flagged as binary because it was not converted to that
# XTODO: Revisit if this logic is fine
response = design_matrices("y2 ~ x", data).response
assert list(np.unique(response.design_vector)) == [0, 1]
assert response.levels == ["A", "B"]
assert response.binary is False
assert response.success is None
# Binary response with explicit level
response = design_matrices("y2['B'] ~ x", data).response
assert list(np.unique(response.design_vector)) == [0, 1]
assert response.levels is None
assert response.binary is True
assert response.success == "B"
# Binary response with explicit level passed as identifier
response = design_matrices("y2[B] ~ x", data).response
assert list(np.unique(response.design_vector)) == [0, 1]
assert response.levels is None
assert response.binary is True
assert response.success == "B"
# Binary response with explicit level with spaces
response = design_matrices("y3['Bye bye'] ~ x", data).response
assert list(np.unique(response.design_vector)) == [0, 1]
assert response.levels is None
assert response.binary is True
assert response.success == "Bye bye"
# Users trying to use nested brackets (WHY?)
with pytest.raises(ParseError, match=re.escape("Are you using nested brackets? Why?")):
design_matrices("y3[A[B]] ~ x", data)
# Users try to pass a number to use a number
with pytest.raises(
ParseError, match=re.escape("Subset notation only allows a string or an identifer")
):
design_matrices("y3[1] ~ x", data)
def test_binary_function():
size = 100
data = pd.DataFrame(
{
"y": np.random.randint(0, 5, size=size),
"x": np.random.randint(5, 10, size=size),
"g": np.random.choice(["a", "b", "c"], size=size),
}
)
# String value
term = design_matrices("y ~ binary(g, 'c')", data).common["binary(g, c)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["g"] == "c"))
# Numeric value
term = design_matrices("y ~ binary(x, 7)", data).common["binary(x, 7)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["x"] == 7))
# Variable name
# string
m = "b"
term = design_matrices("y ~ binary(g, m)", data).common["binary(g, m)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["g"] == m))
# numeric
z = 8
term = design_matrices("y ~ binary(x, z)", data).common["binary(x, z)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["x"] == z))
# Pass nothing
term = design_matrices("y ~ binary(x)", data).common["binary(x)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["x"] == 5))
# Values not found in the variable
with pytest.raises(ValueError):
design_matrices("y ~ binary(g, 'Not found')", data)
with pytest.raises(ValueError):
design_matrices("y ~ binary(x, 999)", data)
def test_B_function():
size = 100
data = pd.DataFrame(
{
"y": np.random.randint(0, 5, size=size),
"x": np.random.randint(5, 10, size=size),
"g": np.random.choice(["a", "b", "c"], size=size),
}
)
# String value
term = design_matrices("y ~ B(g, 'c')", data).common["B(g, c)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["g"] == "c"))
# Numeric value
term = design_matrices("y ~ B(x, 7)", data).common["B(x, 7)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["x"] == 7))
# Variable name
# string
m = "b"
term = design_matrices("y ~ B(g, m)", data).common["B(g, m)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["g"] == m))
# numeric
z = 8
term = design_matrices("y ~ B(x, z)", data).common["B(x, z)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["x"] == z))
# Pass nothing
term = design_matrices("y ~ B(x)", data).common["B(x)"].squeeze()
assert np.array_equal(np.where(term == 1), np.where(data["x"] == 5))
# Values not found in the variable
with pytest.raises(ValueError):
design_matrices("y ~ B(g, 'Not found')", data)
with pytest.raises(ValueError):
design_matrices("y ~ B(x, 999)", data)
def test_C_function():
size = 100
rng = np.random.default_rng(1234)
data = pd.DataFrame(
{
"x": rng.integers(5, 10, size=size),
"g": rng.choice(["a", "b", "c"], size=size),
}
)
term = design_matrices("C(x)", data).common.terms["C(x)"]
assert term.kind == "categoric"
assert term.levels == ["6", "7", "8", "9"]
assert term.data.shape == (100, 4)
levels = [6, 8, 5, 7, 9]
term = design_matrices("C(x, levels=levels)", data).common.terms["C(x, levels = levels)"]
assert term.kind == "categoric"
assert term.levels == [str(level) for level in levels[1:]]
assert term.data.shape == (100, 4)
levels = ["b", "c", "a"]
term = design_matrices("C(g, levels=levels)", data).common.terms["C(g, levels = levels)"]
assert term.kind == "categoric"
assert term.levels == levels[1:]
assert term.data.shape == (100, 2)
# Treatment encoding, the default
t1 = design_matrices("C(x, Treatment)", data).common.terms["C(x, Treatment)"]
t2 = design_matrices("C(x, Treatment())", data).common.terms["C(x, Treatment())"]
t3 = design_matrices("C(x, Treatment(5))", data).common.terms["C(x, Treatment(5))"]
t4 = design_matrices("C(x)", data).common.terms["C(x)"]
assert np.array_equal(t1.data, t2.data)
assert np.array_equal(t1.data, t3.data)
assert np.array_equal(t1.data, t4.data)
t1 = design_matrices("C(g, Treatment)", data).common.terms["C(g, Treatment)"]
t2 = design_matrices("C(g, Treatment())", data).common.terms["C(g, Treatment())"]
t3 = design_matrices("C(g, Treatment('a'))", data).common.terms["C(g, Treatment(a))"]
t4 = design_matrices("C(g)", data).common.terms["C(g)"]
assert np.array_equal(t1.data, t2.data)
assert np.array_equal(t1.data, t3.data)
assert np.array_equal(t1.data, t4.data)
# Sum encoding, not the default
t1 = design_matrices("C(x, Sum)", data).common.terms["C(x, Sum)"]
t2 = design_matrices("C(x, Sum())", data).common.terms["C(x, Sum())"]
t3 = design_matrices("C(x, Sum(9))", data).common.terms["C(x, Sum(9))"]
assert np.array_equal(t1.data, t2.data)
assert np.array_equal(t1.data, t3.data)
t1 = design_matrices("C(g, Sum)", data).common.terms["C(g, Sum)"]
t2 = design_matrices("C(g, Sum())", data).common.terms["C(g, Sum())"]
t3 = design_matrices("C(g, Sum('c'))", data).common.terms["C(g, Sum(c))"]
assert np.array_equal(t1.data, t2.data)
assert np.array_equal(t1.data, t3.data)
# NOTE: B(g, 'c') is then looked up ad B(g, c).
# From the second it is impossible to tell if c is a literal or variable
def test_C_aliases():
size = 100
rng = np.random.default_rng(1234)
data = pd.DataFrame(
{
"x": rng.integers(5, 10, size=size),
"g": rng.choice(["a", "b", "c"], size=size),
}
)
t1 = design_matrices("S(x)", data).common.terms["S(x)"]
t2 = design_matrices("C(x, Sum)", data).common.terms["C(x, Sum)"]
assert np.array_equal(t1.data, t2.data)
t1 = design_matrices("T(x)", data).common.terms["T(x)"]
t2 = design_matrices("C(x, Treatment)", data).common.terms["C(x, Treatment)"]
assert np.array_equal(t1.data, t2.data)
def test_S_function():
size = 100
rng = np.random.default_rng(1234)
data = pd.DataFrame(
{
"x": rng.integers(5, 10, size=size),
"g": rng.choice(["a", "b", "c"], size=size),
}
)
term = design_matrices("0 + S(x)", data).common.terms["S(x)"]
assert term.labels == ["S(x)[mean]"] + [f"S(x)[{l}]" for l in [5, 6, 7, 8]]
term = design_matrices("S(x, 7)", data).common.terms["S(x, 7)"]
assert term.labels == [f"S(x, 7)[{l}]" for l in [5, 6, 8, 9]]
# It still drops last level, no matter we changed the order
levels = [9, 8, 7, 6, 5]
term = design_matrices("S(x, levels = levels)", data).common.terms["S(x, levels = levels)"]
assert term.labels == [f"S(x, levels = levels)[{l}]" for l in levels[:-1]]
term = design_matrices("S(x, 6, levels)", data).common.terms["S(x, 6, levels)"]
assert term.labels == [f"S(x, 6, levels)[{l}]" for l in [9, 8, 7, 5]]
def test_T_function():
size = 100
rng = np.random.default_rng(1234)
data = pd.DataFrame(
{
"x": rng.integers(5, 10, size=size),
"g": rng.choice(["a", "b", "c"], size=size),
}
)
term = design_matrices("0 + T(x)", data).common.terms["T(x)"]
assert term.labels == [f"T(x)[{l}]" for l in [5, 6, 7, 8, 9]]
term = design_matrices("T(x, 7)", data).common.terms["T(x, 7)"]
assert term.labels == [f"T(x, 7)[{l}]" for l in [5, 6, 8, 9]]
# It still drops first level, no matter we changed the order
levels = [9, 8, 7, 6, 5]
term = design_matrices("T(x, levels = levels)", data).common.terms["T(x, levels = levels)"]
assert term.labels == [f"T(x, levels = levels)[{l}]" for l in levels[1:]]
term = design_matrices("T(x, 6, levels)", data).common.terms["T(x, 6, levels)"]
assert term.labels == [f"T(x, 6, levels)[{l}]" for l in [9, 8, 7, 5]]
def test_offset():
size = 100
rng = np.random.default_rng(1234)
data = pd.DataFrame(
{
"y": rng.integers(0, 5, size=size),
"x": rng.integers(5, 10, size=size),
"g": np.random.choice(["a", "b", "c"], size=size),
}
)
dm = design_matrices("y ~ offset(x)", data)
term = dm.common.terms["offset(x)"]
assert term.kind == "offset"
assert term.labels == ["offset(x)"]
assert (dm.common["offset(x)"].flatten() == data["x"].values).all()
with pytest.raises(
ValueError, match=re.escape("offset() can only be used with numeric variables")
):
design_matrices("y ~ offset(g)", data)
with pytest.raises(ValueError, match=re.escape("offset() cannot be used as a response term.")):
design_matrices("offset(y) ~ x", data)
def test_predict_prop(beetle):
# If trials is a variable, new dataset must have that variable
dm = design_matrices("prop(y, n) ~ x", beetle)
result = dm.response.evaluate_new_data(pd.DataFrame({"n": [10, 10, 30, 30]}))
assert (result == np.array([10, 10, 30, 30])).all()
# If trials is a constant value, return that same value
dm = design_matrices("prop(y, 70) ~ x", beetle)
result = dm.response.evaluate_new_data( | pd.DataFrame({"n": [10, 10, 30, 30]}) | pandas.DataFrame |
import pandas as pd
from thermotar.utils import df_utils
class Replicator:
def __init__(self,objects,indexes = None,index_names = None, get_methods = 'inheritable'):
'''Grabs list of dataframes or objects with .data methods. Concatanates them into one dataframe and sets to this dfs df '''
if not indexes:
indexes = range(len(objects))
self.index_names = index_names
self.replicas = {index:item for index,item in zip(indexes,objects)}
# assume homogenous
try:
# to do add some sort of index naming
df = | pd.concat([member.data for member in objects],keys=indexes,ignore_index=False,join='outer',names=index_names) | pandas.concat |
from tensorflow.python.ops.functional_ops import While
import tensorflow as tf
import numpy as np
import pandas as pd
import waktu as wk
import time
from datetime import datetime
from datetime import date
import schedule
import pyrebase
import json
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from tensorflow import keras
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.python.eager.context import num_gpus
from os import read, stat_result
from re import T, X
cred = credentials.Certificate("serviceAccountKey.json")
firebase_admin.initialize_app(cred)
dbStore = firestore.client()
def cekHari():
cekNow = date.today().strftime("%A")
if cekNow == 'Monday':
now = 0
elif cekNow == 'Tuesday':
now = 1
elif cekNow == 'Wednesday':
now = 2
elif cekNow == 'Thursday':
now = 3
elif cekNow == 'Friday':
now = 4
elif cekNow == 'Saturday':
now = 5
elif cekNow == 'Sunday':
now = 6
return now
config = {
"apiKey": "<KEY>",
"authDomain": "cloudta2021-fa4af.firebaseapp.com",
"databaseURL": "https://cloudta2021-fa4af-default-rtdb.firebaseio.com",
"storageBucket": "cloudta2021-fa4af.appspot.com"
}
DataHead = "Dataset Hasil Pengujian"
firebase = pyrebase.initialize_app(config)
db = firebase.database()
timeNow = datetime.now()
jam = timeNow.hour
menit = timeNow.minute
timestamp = timeNow.strftime("%H:%M")
day = date.today().strftime("%A")
idrelay = [0, 1, 2, 3]
hari = cekHari()
waktu = wk.cekWaktu(jam, menit)
# waktu = 120
# hari = 0
data = pd.read_csv('FixDataBind.csv')
data = pd.DataFrame(data, columns=['waktu', 'hari', 'idrelay', 'status'])
data['waktu'] = pd.factorize(data['waktu'])[0]
data['hari'] = | pd.factorize(data['hari']) | pandas.factorize |
from mpi4py import MPI
import matplotlib
from tmm import coh_tmm
import pandas as pd
import os
from numpy import pi
from scipy.interpolate import interp1d
from joblib import Parallel, delayed
import numpy as np
import glob
import matplotlib.pyplot as plt
import pickle as pkl
import seaborn as sns
from scipy.optimize import minimize
import json
from tqdm import tqdm
DATABASE = './data'
INSULATORS = ['HfO2', 'SiO2', 'SiC', 'Al2O3', 'MgF2', 'TiO2', 'Fe2O3', 'MgF2', 'Si3N4', 'TiN', 'ZnO', 'ZnS', 'ZnSe']
METALS = ['Ag', 'Al', 'Cr', 'Ge', 'Si', 'Ni']
num_workers = 8
def cal_reward(R, T, A, target):
'''
Calculate reward based on given spectrums.
We calculate the reward using averaged (1-mse).
Args:
R, T, A: numpy array. Reflection, transmission, and
absorption spectrums, respectively.
target: dict. {'R':np.array, 'T':np.array, 'A':np.array}
Returns:
reward: float. Reward for the spectrum.
'''
reward = 0
for k, v in target.items():
if k == 'R':
res = R
elif k == 'T':
res = T
else:
res = A
reward += 1 - np.abs(res.squeeze() - v).mean()
reward /= len(target)
return reward
class Memory:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
def batch_spectrum(env, names_list, thickness_list):
def spectrum(args):
'''
Inputs:
1. names: list of lists, each list correspond to the structures
2. thickness: list of lists
'''
names, thickness = args
R, T, A = env.spectrum(names, thickness, 0, False)
return R, T, A
res = Parallel(n_jobs=num_workers)(delayed(spectrum)(args)
for args in
zip(names_list, thickness_list))
res = np.array(res)
Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]
return Rs, Ts, As
def merge_layers(categories, thicknesses):
'''
Merges consecutive layers with the same material types.
'''
thicknesses = thicknesses[1:-1]
c_output = [categories[0]]
t_output = [thicknesses[0]]
for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):
if c == c_output[-1]:
t_output[-1] += d
continue
else:
c_output.append(c)
t_output.append(d)
t_output.insert(0, np.inf)
t_output.insert(len(t_output), np.inf)
return c_output, t_output
def get_structure(categories, values, materials, ds, continuous=False,
max_value=400):
'''
Given categories and values, return the strucure in the form
(name (str), thickness (nm))
'''
def threshold(value):
'''
'''
names = [materials[item] for item in categories]
if not continuous:
thickness = [np.inf] + [ds[item] for item in values] + [np.inf]
else:
thickness = []
for category, value in zip(categories, values):
name = materials[category]
if name == 'Ag':
thickness.append(
min(max(15, int(value * max_value//2)), max_value))
elif name in METALS:
thickness.append(
min(max(5, int(value * max_value//2)), max_value))
elif name in INSULATORS:
thickness.append(
min(max(1, int(value * max_value//2)), max_value))
else:
raise ValueError('Material not known')
# thickness = [np.inf] + [min(max(5, int(item * 2e2)), 200) for i,
# item in enumerate(values)] + [np.inf]
thickness = [np.inf] + thickness + [np.inf]
return names, thickness
class DesignTracker():
def __init__(self, epochs, **kwargs):
"""
This class tracks the best designs discovered.
"""
if epochs == -1:
self.layer_ls = []
self.thick_ls = []
self.max_ret_ls = []
self.layer_ls = [0] * epochs
self.thick_ls = [0] * epochs
self.max_ret_ls = [0] * epochs
self.kwargs = kwargs
self.current_e = 0
def store(self, layers, thicknesses, ret, e, append_mode=False):
if append_mode:
self.layer_ls.append(layers)
self.thick_ls.append(thicknesses)
self.max_ret_ls.append(ret)
else:
if ret >= self.max_ret_ls[e]:
self.layer_ls[e] = layers
self.thick_ls[e] = thicknesses
self.max_ret_ls[e] = ret
def save_state(self):
# save buffer from all processes
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
filename = os.path.join(self.kwargs['output_dir'], 'design_tracker_{}.pkl'.format(rank))
pkl.dump(self, open(filename, 'wb'))
def print_progress(self):
progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))
read_progress = []
for i in range(len(progress)):
if progress[i] == (0,0,0):
break
read_progress.append(['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]) + ', Merit {:.3f}'.format(progress[i][2])])
return read_progress
def print_progress(progress):
for i in range(len(progress)):
print(progress[i], 0)
progress[i] = ['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]), progress[i][2]]
return progress
class TMM_sim():
def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01), substrate='Cr', substrate_thick=500):
'''
This class returns the spectrum given the designed structures.
'''
self.mats = mats
# include substrate
self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'] else mats
self.wavelength = wavelength
self.nk_dict = self.load_materials()
self.substrate = substrate
self.substrate_thick = substrate_thick
def load_materials(self):
'''
Load material nk and return corresponding interpolators.
Return:
nk_dict: dict, key -- material name, value: n, k in the
self.wavelength range
'''
nk_dict = {}
for mat in self.all_mats:
nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))
nk.dropna(inplace=True)
wl = nk['wl'].to_numpy()
index = (nk['n'] + nk['k'] * 1.j).to_numpy()
mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))
mat_nk_fn = interp1d(
mat_nk_data[:, 0].real, mat_nk_data[:, 1], kind='quadratic')
nk_dict[mat] = mat_nk_fn(self.wavelength)
return nk_dict
def spectrum(self, materials, thickness, theta=0, plot=False, title=False):
'''
Input:
materials: list
thickness: list
theta: degree, the incidence angle
Return:
s: array, spectrum
'''
degree = pi/180
if self.substrate != 'Air':
thickness.insert(-1, self.substrate_thick) # substrate thickness
R, T, A = [], [], []
for i, lambda_vac in enumerate(self.wavelength * 1e3):
# we assume the last layer is glass
if self.substrate == 'Glass':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1.45, 1]
elif self.substrate == 'Air':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1]
else:
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict[self.substrate][i], 1]
# n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict['Cr'][i]]
# mport pdb; pdb.set_trace()
res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)
R.append(res['R'])
T.append(res['T'])
R, T = np.array(R), np.array(T)
A = 1 - R - T
if plot:
self.plot_spectrum(R, T, A)
if title:
thick = thickness[1:-1]
title = ' | '.join(['{}nm {}'.format(d, m)
for d, m in zip(thick, materials)])
if self.substrate is not 'Air':
title = 'Air | ' + title + ' | {}nm {} '.format(self.substrate_thick, self.substrate) + '| Air'
else:
title = 'Air | ' + title + ' | Air'
plt.title(title, **{'size': '10'})
return R, T, A
def plot_spectrum(self, R, T, A):
plt.plot(self.wavelength * 1000, R, self.wavelength *
1000, T, self.wavelength * 1000, A, linewidth=3)
plt.ylabel('R/T/A')
plt.xlabel('Wavelength (nm)')
plt.legend(['R: Average = {:.2f}%'.
format(np.mean(R)*100),
'T: Average = {:.2f}%'.
format(np.mean(T)*100),
'A: Average = {:.2f}%'.
format(np.mean(A)*100)])
plt.grid('on', linestyle='--')
plt.ylim([0, 1])
# Plotting utils
def visualize_progress(file, x, ax=None, color='b', alpha=1):
df = pd.read_csv(file, sep="\t")
width = 0.5
# x = 'Time'
if ax is None:
fig, ax = plt.subplots(2,1)
sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha)
# ax[0].legend(['Max {}'.format(np.max(df['MaxEpRet']))])
sns.lineplot(x=x, y='AverageEpRet', data=df,
ax=ax[1], color=color, alpha=alpha)
plt.fill_between(df[x],
df['AverageEpRet']-width/2*df['StdEpRet'],
df['AverageEpRet']+width/2*df['StdEpRet'],
alpha=0.3, color=color)
return df
def combine_tracker(folder):
'''
Merge all buffers
'''
trackers = []
if 'design_tracker_merged.pkl' in os.listdir(folder):
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
combined_tracker = pkl.load(open(tracker_file, 'rb'))
return combined_tracker
for file in os.listdir(folder):
if file.startswith('design_tracker_'):
tracker_file = os.path.join(folder, file)
trackers.append(pkl.load(open(tracker_file, 'rb')))
combined_tracker = DesignTracker(len(trackers[0].layer_ls))
max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in trackers]), axis=0)
for e in range(len(trackers[0].layer_ls)):
combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]
combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]
combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]
if combined_tracker.layer_ls[-1] != 0:
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file), 'wb'))
return combined_tracker
def summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):
root = '../spinningup/data/'
progress_ls = []
max_ret_ls = []
params = {'size':14}
matplotlib.rc('font', **params)
fig, ax = plt.subplots(2,1, figsize=(10,8))
for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):
folder = os.path.join(root, exp, exp+'_s{}'.format(seed))
progress_file = os.path.join(folder, 'progress.txt')
df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)
tracker = combine_tracker(folder)
progress = tracker.print_progress()
print('{}, Best discovered so far {}'.format(exp, progress[np.argmax(tracker.max_ret_ls)]))
progress_ls.append(progress)
max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))
ax[0].legend(max_ret_ls)
ax[1].legend(exp_ls)
plt.show()
return progress_ls
def load_exp_res(folder):
subfolders = [item for item in glob.glob(folder+'/*')]
def read_hyper(file_name, rep=10):
with open(os.path.join(file_name, 'config.json')) as f:
hypers = json.load(f)
hypers_dict = {}
for k, v in hypers.items():
if k.startswith('logger'):
continue
elif isinstance(v, dict):
for kk, vv in v.items():
if isinstance(vv, list):
hypers_dict[str(k)+'_'+str(kk)] = [vv[0]]*rep
else:
hypers_dict[str(k)+'_'+str(kk)] = [vv]*rep
else:
hypers_dict[k] = [v] * rep
hyper_df = pd.DataFrame(hypers_dict)
return hyper_df
first=True # first pandas file to load
for subfolder in tqdm(subfolders):
runs = glob.glob(subfolder+'/*')
num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),sep='\t'))
for run in runs:
tracker = combine_tracker(run)
progress = tracker.print_progress()
best_design = progress[np.argmax(tracker.max_ret_ls)]
if first:
df = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\t')
hyper_df = read_hyper(run, rep=len(df))
best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df))
df = pd.concat([df, hyper_df, best_designs_df], axis=1)
first = False
else:
df_ = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\t')
hyper_df = read_hyper(run, rep=len(df_))
best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df_))
df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)
df = | pd.concat([df, df_], axis=0) | pandas.concat |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
date: 2019/11/26 16:33
contact: <EMAIL>
desc: 河北省空气质量预报信息发布系统
http://172.16.58.3/publish/
等级划分
1. 空气污染指数为0-50,空气质量级别为一级,空气质量状况属于优。此时,空气质量令人满意,基本无空气污染,各类人群可正常活动。
2. 空气污染指数为51-100,空气质量级别为二级,空气质量状况属于良。此时空气质量可接受,但某些污染物可能对极少数异常敏感人群健康有较弱影响,建议极少数异常敏感人群应减少户外活动。
3. 空气污染指数为101-150,空气质量级别为三级,空气质量状况属于轻度污染。此时,易感人群症状有轻度加剧,健康人群出现刺激症状。建议儿童、老年人及心脏病、呼吸系统疾病患者应减少长时间、高强度的户外锻炼。
4. 空气污染指数为151-200,空气质量级别为四级,空气质量状况属于中度污染。此时,进一步加剧易感人群症状,可能对健康人群心脏、呼吸系统有影响,建议疾病患者避免长时间、高强度的户外锻练,一般人群适量减少户外运动。
5. 空气污染指数为201-300,空气质量级别为五级,空气质量状况属于重度污染。此时,心脏病和肺病患者症状显著加剧,运动耐受力降低,健康人群普遍出现症状,建议儿童、老年人和心脏病、肺病患者应停留在室内,停止户外运动,一般人群减少户外运动。
6. 空气污染指数大于300,空气质量级别为六级,空气质量状况属于严重污染。此时,健康人群运动耐受力降低,有明显强烈症状,提前出现某些疾病,建议儿童、老年人和病人应当留在室内,避免体力消耗,一般人群应避免户外活动。
发布单位:河北省环境应急与重污染天气预警中心 技术支持:中国科学院大气物理研究所 中科三清科技有限公司
"""
import datetime
import pandas as pd
import requests
def air_hebei(city="唐山市"):
"""
河北省空气质量预报信息发布系统-空气质量预报, 未来 6 天
http://172.16.58.3/publish/
:param city: ['石家庄市', '唐山市', '秦皇岛市', '邯郸市', '邢台市', '保定市', '张家口市', '承德市', '沧州市', '廊坊市', '衡水市', '辛集市', '定州市']
:type city: str
:return: city = "", 返回所有地区的数据; city = "唐山市", 返回唐山市的数据
:rtype: pandas.DataFrame
"""
url = f"http://172.16.58.3/publishNewServer/api/CityPublishInfo/GetProvinceAndCityPublishData?publishDate={datetime.datetime.today().strftime('%Y-%m-%d')}%2016:00:00"
res = requests.get(url)
json_data = res.json()
city_list = pd.DataFrame.from_dict(json_data["cityPublishDatas"], orient="columns")["CityName"].tolist()
# 未来第一天, 以此类推
future_df_1 = | pd.DataFrame.from_dict([item["Date1"] for item in json_data["cityPublishDatas"]], orient="columns") | pandas.DataFrame.from_dict |
import os
import pickle
import re
from collections import Counter
import networkx as nx
import pandas as pd
from quantlaw.utils.files import ensure_exists, list_dir
def filename_for_mapping(mapping):
return f'{mapping["snapshot"]}_{mapping["pp_merge"]}.pickle'
def cd_cluster_evolution_mappings_prepare(
overwrite, cluster_mapping_configs, source_folder, target_folder, snapshots
):
ensure_exists(target_folder)
subseqitems_snapshots = [
f.split(".")[0] for f in list_dir(f"{source_folder}/", ".edges.csv.gz")
] # fix
if snapshots:
subseqitems_snapshots = [s for s in subseqitems_snapshots if s in snapshots]
# get configs
mappings = [
dict(
pp_merge=pp_merge,
snapshot=subseqitems_snapshot,
)
for pp_merge in cluster_mapping_configs["pp_merges"]
for subseqitems_snapshot in subseqitems_snapshots
]
existing_files = set(list_dir(target_folder, ".pickle"))
if not overwrite:
mappings = [
mapping
for mapping in mappings
if filename_for_mapping(mapping) not in existing_files
]
return sorted(mappings, key=str)
def cd_cluster_evolution_mappings(
item, source_folder, preprocessed_graph_folder, target_folder
):
pattern = re.compile(
re.escape(item["snapshot"])
+ r"_[0-9\-]+_[0-9\-]+_"
+ re.escape(str(item["pp_merge"]).replace(".", "-"))
+ r".*\.gpickle\.gz"
)
filenames = sorted(
[
filename
for filename in os.listdir(preprocessed_graph_folder)
if pattern.fullmatch(filename)
]
)
if len(filenames) > 1:
print("Found multiple matching preprocessed graphs. Taking", filenames[0])
elif not filenames:
raise Exception("Not preprocessed graphs found for", pattern)
filename = filenames[0]
G = nx.read_gpickle(os.path.join(preprocessed_graph_folder, filename))
cluster_level_nodes = set(G.nodes())
del G
df_nodes = pd.read_csv(
os.path.join(source_folder, item["snapshot"] + ".nodes.csv.gz"),
dtype={"texts_tokens_n": str, "texts_chars_n": str},
)
df_edges = pd.read_csv(
os.path.join(source_folder, item["snapshot"] + ".edges.csv.gz")
)
containment_edges = df_edges[df_edges.edge_type == "containment"]
parents = {v: u for v, u in zip(containment_edges.v, containment_edges.u)}
items_mapping = {k: [] for k in cluster_level_nodes}
for key in df_nodes.key:
contracted_to = get_contracted_node(key, parents, cluster_level_nodes)
if contracted_to:
items_mapping[contracted_to].append(key)
node_seqitem_counts = Counter(
get_contracted_node(key, parents, cluster_level_nodes)
for key in df_nodes[df_nodes.type == "seqitem"].key
)
node_seqitem_counts = dict(node_seqitem_counts)
tokens_n = {k: v for k, v in zip(df_nodes.key, df_nodes.tokens_n) if not | pd.isna(v) | pandas.isna |
"""
This script visualises the prevention parameters of the first and second COVID-19 waves.
Arguments:
----------
-f:
Filename of samples dictionary to be loaded. Default location is ~/data/interim/model_parameters/COVID19_SEIRD/calibrations/national/
Returns:
--------
Example use:
------------
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved."
# ----------------------
# Load required packages
# ----------------------
import json
import argparse
import datetime
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
from covid19model.models import models
from covid19model.data import mobility, sciensano, model_parameters
from covid19model.models.time_dependant_parameter_fncs import ramp_fun
from covid19model.visualization.output import _apply_tick_locator
from covid19model.visualization.utils import colorscale_okabe_ito, moving_avg
# covid 19 specific parameters
plt.rcParams.update({
"axes.prop_cycle": plt.cycler('color',
list(colorscale_okabe_ito.values())),
})
# -----------------------
# Handle script arguments
# -----------------------
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--n_samples", help="Number of samples used to visualise model fit", default=100, type=int)
parser.add_argument("-k", "--n_draws_per_sample", help="Number of binomial draws per sample drawn used to visualize model fit", default=1, type=int)
args = parser.parse_args()
#################################################
## PART 1: Comparison of total number of cases ##
#################################################
youth = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())
cases_youth_nov21 = youth[youth.index == pd.to_datetime('2020-11-21')].values
cases_youth_rel = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())/cases_youth_nov21*100
work = moving_avg((df_sciensano['C_20_29']+df_sciensano['C_30_39']+df_sciensano['C_40_49']+df_sciensano['C_50_59']).to_frame())
cases_work_nov21 = work[work.index == pd.to_datetime('2020-11-21')].values
cases_work_rel = work/cases_work_nov21*100
old = moving_avg((df_sciensano['C_60_69']+df_sciensano['C_70_79']+df_sciensano['C_80_89']+df_sciensano['C_90+']).to_frame())
cases_old_nov21 = old[old.index == pd.to_datetime('2020-11-21')].values
cases_old_rel = old/cases_old_nov21*100
fig,ax=plt.subplots(figsize=(12,4.3))
ax.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax.set_ylim([0,320])
ax.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax = _apply_tick_locator(ax)
ax.set_yticks([0,100,200,300])
ax.grid(False)
plt.tight_layout()
plt.show()
def crosscorr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas.Series objects of equal length
Returns
----------
crosscorr : float
"""
return datax.corr(datay.shift(lag))
lag_series = range(-15,8)
covariance_youth_work = []
covariance_youth_old = []
covariance_work_old = []
for lag in lag_series:
covariance_youth_work.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_youth_old.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_work_old.append(crosscorr(cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariances = [covariance_youth_work, covariance_youth_old, covariance_work_old]
for i in range(3):
n = len(covariances[i])
k = max(covariances[i])
idx=np.argmax(covariances[i])
tau = lag_series[idx]
sig = 2/np.sqrt(n-abs(k))
if k >= sig:
print(tau, k, True)
else:
print(tau, k, False)
fig,(ax1,ax2)=plt.subplots(nrows=2,ncols=1,figsize=(15,10))
# First part
ax1.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax1.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax1.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax1.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax1.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax1.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax1.set_ylim([0,300])
ax1.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax1.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax1 = _apply_tick_locator(ax1)
# Second part
ax2.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax2.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax2.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax2.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax2.axvline(0,linewidth=1, color='black')
ax2.grid(False)
ax2.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax2.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
fig,ax = plt.subplots(figsize=(15,5))
ax.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax.axvline(0,linewidth=1, color='black')
ax.grid(False)
ax.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
#####################################################
## PART 1: Calibration robustness figure of WAVE 1 ##
#####################################################
n_calibrations = 6
n_prevention = 3
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-15.json')), # 2020-04-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-13.json')), # 2020-04-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-23.json')), # 2020-05-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-18.json')), # 2020-05-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-21.json')), # 2020-06-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json')) # 2020-07-01
]
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-03-15'
# Last datapoint used in inference
end_calibrations = ['2020-04-04', '2020-04-15', '2020-05-01', '2020-05-15', '2020-06-01', '2020-07-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2020-07-14'
# ---------
# Load data
# ---------
# Contact matrices
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = model_parameters.get_interaction_matrices(dataset='willem_2012')
Nc_all = {'total': Nc_total, 'home':Nc_home, 'work': Nc_work, 'schools': Nc_schools, 'transport': Nc_transport, 'leisure': Nc_leisure, 'others': Nc_others}
levels = initN.size
# Google Mobility data
df_google = mobility.get_google_mobility_data(update=False)
# ---------------------------------
# Time-dependant parameter function
# ---------------------------------
# Extract build contact matrix function
from covid19model.models.time_dependant_parameter_fncs import make_contact_matrix_function, ramp_fun
contact_matrix_4prev, all_contact, all_contact_no_schools = make_contact_matrix_function(df_google, Nc_all)
# Define policy function
def policies_wave1_4prev(t, states, param, l , tau, prev_schools, prev_work, prev_rest, prev_home):
# Convert tau and l to dates
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-09-01') # end of summer holidays
# Define key dates of second wave
t5 = pd.Timestamp('2020-10-19') # lockdown (1)
t6 = pd.Timestamp('2020-11-02') # lockdown (2)
t7 = pd.Timestamp('2020-11-16') # schools re-open
t8 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t9 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t10 = pd.Timestamp('2021-02-15') # Spring break starts
t11 = pd.Timestamp('2021-02-21') # Spring break ends
t12 = pd.Timestamp('2021-04-05') # Easter holiday starts
t13 = pd.Timestamp('2021-04-18') # Easter holiday ends
# ------
# WAVE 1
# ------
if t <= t1:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 < t < t1 + tau_days:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 + tau_days < t <= t1 + tau_days + l_days:
t = pd.Timestamp(t.date())
policy_old = all_contact(t)
policy_new = contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t1)
elif t1 + tau_days + l_days < t <= t2:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t2 < t <= t3:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t3 < t <= t4:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
# ------
# WAVE 2
# ------
elif t4 < t <= t5 + tau_days:
return contact_matrix_4prev(t, school=1)
elif t5 + tau_days < t <= t5 + tau_days + l_days:
policy_old = contact_matrix_4prev(t, school=1)
policy_new = contact_matrix_4prev(t, prev_schools, prev_work, prev_rest,
school=1)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t5)
elif t5 + tau_days + l_days < t <= t6:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t6 < t <= t7:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t7 < t <= t8:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t8 < t <= t9:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t9 < t <= t10:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t10 < t <= t11:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t11 < t <= t12:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t12 < t <= t13:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
else:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Define initial states
initial_states = {"S": initN, "E": np.ones(9)}
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------------
# Define necessary function to plot fit
# -------------------------------------
LL = conf_int/2
UL = 1-conf_int/2
def add_poisson(state_name, output, n_samples, n_draws_per_sample, UL=1-0.05*0.5, LL=0.05*0.5):
data = output[state_name].sum(dim="Nc").values
# Initialize vectors
vector = np.zeros((data.shape[1],n_draws_per_sample*n_samples))
# Loop over dimension draws
for n in range(data.shape[0]):
binomial_draw = np.random.poisson( np.expand_dims(data[n,:],axis=1),size = (data.shape[1],n_draws_per_sample))
vector[:,n*n_draws_per_sample:(n+1)*n_draws_per_sample] = binomial_draw
# Compute mean and median
mean = np.mean(vector,axis=1)
median = np.median(vector,axis=1)
# Compute quantiles
LL = np.quantile(vector, q = LL, axis = 1)
UL = np.quantile(vector, q = UL, axis = 1)
return mean, median, LL, UL
def plot_fit(ax, state_name, state_label, data_df, time, vector_mean, vector_LL, vector_UL, start_calibration='2020-03-15', end_calibration='2020-07-01' , end_sim='2020-09-01'):
ax.fill_between(pd.to_datetime(time), vector_LL, vector_UL,alpha=0.30, color = 'blue')
ax.plot(time, vector_mean,'--', color='blue', linewidth=1.5)
ax.scatter(data_df[start_calibration:end_calibration].index,data_df[state_name][start_calibration:end_calibration], color='black', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax.scatter(data_df[pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim].index,data_df[state_name][pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim], color='red', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax = _apply_tick_locator(ax)
ax.set_xlim(start_calibration,end_sim)
ax.set_ylabel(state_label)
return ax
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 3]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 0.5, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,300, 600])
axes[i,j].set_ylim([0,700])
plt.tight_layout()
plt.show()
model_results_WAVE1 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
#####################################
## PART 2: Hospitals vs. R0 figure ##
#####################################
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] * np.sum(Nc, axis=1)[i]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_overall = np.mean(np.sum(R0_norm,axis=0))
return R0, R0_overall
R0, R0_overall = compute_R0(initN, Nc_all['total'], samples_dicts[-1], params)
cumsum = out['H_in'].cumsum(dim='time').values
cumsum_mean = np.mean(cumsum[:,:,-1], axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_LL = cumsum_mean - np.quantile(cumsum[:,:,-1], q = 0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_UL = np.quantile(cumsum[:,:,-1], q = 1-0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0)) - cumsum_mean
cumsum = (out['H_in'].mean(dim="draws")).cumsum(dim='time').values
fraction = cumsum[:,-1]/sum(cumsum[:,-1])
fig,ax = plt.subplots(figsize=(12,4))
bars = ('$[0, 10[$', '$[10, 20[$', '$[20, 30[$', '$[30, 40[$', '$[40, 50[$', '$[50, 60[$', '$[60, 70[$', '$[70, 80[$', '$[80, \infty[$')
x_pos = np.arange(len(bars))
#ax.bar(x_pos, np.mean(R0,axis=1), yerr = [np.mean(R0,axis=1) - np.quantile(R0,q=0.05/2,axis=1), np.quantile(R0,q=1-0.05/2,axis=1) - np.mean(R0,axis=1)], width=1, color='b', alpha=0.5, capsize=10)
ax.bar(x_pos, np.mean(R0,axis=1), width=1, color='b', alpha=0.8)
ax.set_ylabel('$R_0$ (-)')
ax.grid(False)
ax2 = ax.twinx()
#ax2.bar(x_pos, cumsum_mean, yerr = [cumsum_LL, cumsum_UL], width=1,color='orange',alpha=0.9,hatch="/", capsize=10)
ax2.bar(x_pos, cumsum_mean, width=1,color='orange',alpha=0.6,hatch="/")
ax2.set_ylabel('Fraction of hospitalizations (-)')
ax2.grid(False)
plt.xticks(x_pos, bars)
plt.tight_layout()
plt.show()
#########################################
## Part 3: Robustness figure of WAVE 2 ##
#########################################
n_prevention = 4
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-06.json')), # 2020-11-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-05.json')), # 2020-11-16
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-04.json')), # 2020-12-24
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json')), # 2021-02-01
]
n_calibrations = len(samples_dicts)
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-09-01'
# Last datapoint used in inference
end_calibrations = ['2020-11-06','2020-11-16','2020-12-24','2021-02-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2021-02-14'
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Model initial condition on September 1st
warmup = 0
with open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/initial_states_2020-09-01.json', 'r') as fp:
initial_states = json.load(fp)
initial_states.update({
'VE': np.zeros(9),
'V': np.zeros(9),
'V_new': np.zeros(9),
'alpha': np.zeros(9)
})
#initial_states['ICU_tot'] = initial_states.pop('ICU')
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_schools'] = samples_dict['prev_schools'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 1, 6]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{schools}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_schools', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, start_calibration = start_calibration, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,250, 500, 750])
axes[i,j].set_ylim([0,850])
plt.tight_layout()
plt.show()
model_results_WAVE2 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
model_results = [model_results_WAVE1, model_results_WAVE2]
#################################################################
## Part 4: Comparing the maximal dataset prevention parameters ##
#################################################################
samples_dict_WAVE1 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json'))
samples_dict_WAVE2 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json'))
labels = ['$\Omega_{schools}$','$\Omega_{work}$', '$\Omega_{rest}$', '$\Omega_{home}$']
keys = ['prev_schools','prev_work','prev_rest','prev_home']
fig,axes = plt.subplots(1,4,figsize=(12,4))
for idx,ax in enumerate(axes):
if idx != 0:
(n1, bins, patches) = ax.hist(samples_dict_WAVE1[keys[idx]],bins=15,color='blue',alpha=0.4, density=True)
(n2, bins, patches) =ax.hist(samples_dict_WAVE2[keys[idx]],bins=15,color='black',alpha=0.4, density=True)
max_n = max([max(n1),max(n2)])*1.10
ax.axvline(np.mean(samples_dict_WAVE1[keys[idx]]),ls=':',ymin=0,ymax=1,color='blue')
ax.axvline(np.mean(samples_dict_WAVE2[keys[idx]]),ls=':',ymin=0,ymax=1,color='black')
if idx ==1:
ax.annotate('$\mu_1 = \mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
else:
ax.annotate('$\mu_1 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.annotate('$\mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE2[keys[idx]])), xy=(np.mean(samples_dict_WAVE2[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.set_xlabel(labels[idx])
ax.set_yticks([])
ax.spines['left'].set_visible(False)
else:
ax.hist(samples_dict_WAVE2['prev_schools'],bins=15,color='black',alpha=0.6, density=True)
ax.set_xlabel('$\Omega_{schools}$')
ax.set_yticks([])
ax.spines['left'].set_visible(False)
ax.set_xlim([0,1])
ax.xaxis.grid(False)
ax.yaxis.grid(False)
plt.tight_layout()
plt.show()
################################################################
## Part 5: Relative contributions of each contact: both waves ##
################################################################
# --------------------------------
# Re-define function to compute R0
# --------------------------------
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] *Nc[i,j]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_mean = np.sum(R0_norm,axis=0)
return R0, R0_mean
# -----------------------
# Pre-allocate dataframes
# -----------------------
index=df_google.index
columns = [['1','1','1','1','1','1','1','1','1','1','1','1','1','1','1','2','2','2','2','2','2','2','2','2','2','2','2','2','2','2'],['work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL','rest_mean','rest_LL','rest_UL',
'home_mean','home_LL','home_UL','total_mean','total_LL','total_UL','work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL',
'rest_mean','rest_LL','rest_UL','home_mean','home_LL','home_UL','total_mean','total_LL','total_UL']]
tuples = list(zip(*columns))
columns = pd.MultiIndex.from_tuples(tuples, names=["WAVE", "Type"])
data = np.zeros([len(df_google.index),30])
df_rel = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_abs = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_Re = pd.DataFrame(data=data, index=df_google.index, columns=columns)
samples_dicts = [samples_dict_WAVE1, samples_dict_WAVE2]
start_dates =[pd.to_datetime('2020-03-15'), pd.to_datetime('2020-10-19')]
waves=["1", "2"]
for j,samples_dict in enumerate(samples_dicts):
print('\n WAVE: ' + str(j)+'\n')
# ---------------
# Rest prevention
# ---------------
print('Rest\n')
data_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
Re_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_rest[idx,:] = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
new = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
data_rest[idx,:]= old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
new_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.array(samples_dict['prev_rest'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_rest[idx,:] = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.array(samples_dict['prev_rest'])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_rest_mean = np.mean(Re_rest,axis=1)
Re_rest_LL = np.quantile(Re_rest,q=0.05/2,axis=1)
Re_rest_UL = np.quantile(Re_rest,q=1-0.05/2,axis=1)
# ---------------
# Work prevention
# ---------------
print('Work\n')
data_work = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
Re_work = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_work[idx,:] = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.ones(len(samples_dict['prev_work']))
contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.ones(len(samples_dict['prev_work']))
new = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.array(samples_dict['prev_work'])
data_work[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(0.01*(100+df_google['work'][date])*(np.sum(Nc_work,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
new_contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.array(samples_dict['prev_work'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_work[idx,:] = (0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0))))*np.array(samples_dict['prev_work'])
contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.array(samples_dict['prev_work'])
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_work_mean = np.mean(Re_work,axis=1)
Re_work_LL = np.quantile(Re_work, q=0.05/2, axis=1)
Re_work_UL = np.quantile(Re_work, q=1-0.05/2, axis=1)
# ----------------
# Home prevention
# ----------------
print('Home\n')
data_home = np.zeros([len(df_google['work'].values),len(samples_dict['prev_home'])])
Re_home = np.zeros([len(df_google['work'].values),len(samples_dict['prev_home'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_home[idx,:] = np.sum(np.mean(Nc_home,axis=0))*np.ones(len(samples_dict['prev_home']))
contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.ones(len(samples_dict['prev_home']))
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = np.sum(np.mean(Nc_home,axis=0))*np.ones(len(samples_dict['prev_home']))
new = np.sum(np.mean(Nc_home,axis=0))*np.array(samples_dict['prev_home'])
data_home[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(np.sum(Nc_home,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
new_contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.array(samples_dict['prev_home'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_home[idx,:] = np.sum(np.mean(Nc_home,axis=0))*np.array(samples_dict['prev_home'])
contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.array(samples_dict['prev_home'])
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_home_mean = np.mean(Re_home,axis=1)
Re_home_LL = np.quantile(Re_home, q=0.05/2, axis=1)
Re_home_UL = np.quantile(Re_home, q=1-0.05/2, axis=1)
# ------------------
# School prevention
# ------------------
if j == 0:
print('School\n')
data_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
Re_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_schools[idx,:] = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_work']))
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_work']))
new = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work'])
data_schools[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
new_contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days + l_days < date <= pd.to_datetime('2020-09-01'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_schools[idx,:] = 1 * (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work']) # This is wrong, but is never used
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif j == 1:
print('School\n')
data_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_schools'])])
Re_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_schools[idx,:] = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_schools']))
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_schools']))
new = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
data_schools[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
new_contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days + l_days < date <= pd.to_datetime('2020-11-16'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2020-11-16') < date <= pd.to_datetime('2020-12-18'):
data_schools[idx,:] = 1* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2020-12-18') < date <= pd.to_datetime('2021-01-04'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = tmp = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2021-01-04') < date <= pd.to_datetime('2021-02-15'):
data_schools[idx,:] = 1* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2021-02-15') < date <= pd.to_datetime('2021-02-21'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_schools[idx,:] = 1* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_schools_mean = np.mean(Re_schools,axis=1)
Re_schools_LL = np.quantile(Re_schools, q=0.05/2, axis=1)
Re_schools_UL = np.quantile(Re_schools, q=1-0.05/2, axis=1)
# -----
# Total
# -----
data_total = data_rest + data_work + data_home + data_schools
Re_total = Re_rest + Re_work + Re_home + Re_schools
Re_total_mean = np.mean(Re_total,axis=1)
Re_total_LL = np.quantile(Re_total, q=0.05/2, axis=1)
Re_total_UL = np.quantile(Re_total, q=1-0.05/2, axis=1)
# -----------------------
# Absolute contributions
# -----------------------
abs_rest = np.zeros(data_rest.shape)
abs_work = np.zeros(data_rest.shape)
abs_home = np.zeros(data_rest.shape)
abs_schools = np.zeros(data_schools.shape)
abs_total = data_total
for i in range(data_rest.shape[0]):
abs_rest[i,:] = data_rest[i,:]
abs_work[i,:] = data_work[i,:]
abs_home[i,:] = data_home[i,:]
abs_schools[i,:] = data_schools[i,:]
abs_schools_mean = np.mean(abs_schools,axis=1)
abs_schools_LL = np.quantile(abs_schools,LL,axis=1)
abs_schools_UL = np.quantile(abs_schools,UL,axis=1)
abs_rest_mean = np.mean(abs_rest,axis=1)
abs_rest_LL = np.quantile(abs_rest,LL,axis=1)
abs_rest_UL = np.quantile(abs_rest,UL,axis=1)
abs_work_mean = np.mean(abs_work,axis=1)
abs_work_LL = np.quantile(abs_work,LL,axis=1)
abs_work_UL = np.quantile(abs_work,UL,axis=1)
abs_home_mean = np.mean(abs_home,axis=1)
abs_home_LL = np.quantile(abs_home,LL,axis=1)
abs_home_UL = np.quantile(abs_home,UL,axis=1)
abs_total_mean = np.mean(abs_total,axis=1)
abs_total_LL = np.quantile(abs_total,LL,axis=1)
abs_total_UL = np.quantile(abs_total,UL,axis=1)
# -----------------------
# Relative contributions
# -----------------------
rel_rest = np.zeros(data_rest.shape)
rel_work = np.zeros(data_rest.shape)
rel_home = np.zeros(data_rest.shape)
rel_schools = np.zeros(data_schools.shape)
rel_total = np.zeros(data_schools.shape)
for i in range(data_rest.shape[0]):
total = data_schools[i,:] + data_rest[i,:] + data_work[i,:] + data_home[i,:]
rel_rest[i,:] = data_rest[i,:]/total
rel_work[i,:] = data_work[i,:]/total
rel_home[i,:] = data_home[i,:]/total
rel_schools[i,:] = data_schools[i,:]/total
rel_total[i,:] = total/total
rel_schools_mean = np.mean(rel_schools,axis=1)
rel_schools_LL = np.quantile(rel_schools,LL,axis=1)
rel_schools_UL = np.quantile(rel_schools,UL,axis=1)
rel_rest_mean = np.mean(rel_rest,axis=1)
rel_rest_LL = np.quantile(rel_rest,LL,axis=1)
rel_rest_UL = np.quantile(rel_rest,UL,axis=1)
rel_work_mean = np.mean(rel_work,axis=1)
rel_work_LL = np.quantile(rel_work,LL,axis=1)
rel_work_UL = np.quantile(rel_work,UL,axis=1)
rel_home_mean = np.mean(rel_home,axis=1)
rel_home_LL = np.quantile(rel_home,LL,axis=1)
rel_home_UL = np.quantile(rel_home,UL,axis=1)
rel_total_mean = np.mean(rel_total,axis=1)
rel_total_LL = np.quantile(rel_total,LL,axis=1)
rel_total_UL = np.quantile(rel_total,UL,axis=1)
# ---------------------
# Append to dataframe
# ---------------------
df_rel[waves[j],"work_mean"] = rel_work_mean
df_rel[waves[j],"work_LL"] = rel_work_LL
df_rel[waves[j],"work_UL"] = rel_work_UL
df_rel[waves[j], "rest_mean"] = rel_rest_mean
df_rel[waves[j], "rest_LL"] = rel_rest_LL
df_rel[waves[j], "rest_UL"] = rel_rest_UL
df_rel[waves[j], "home_mean"] = rel_home_mean
df_rel[waves[j], "home_LL"] = rel_home_LL
df_rel[waves[j], "home_UL"] = rel_home_UL
df_rel[waves[j],"schools_mean"] = rel_schools_mean
df_rel[waves[j],"schools_LL"] = rel_schools_LL
df_rel[waves[j],"schools_UL"] = rel_schools_UL
df_rel[waves[j],"total_mean"] = rel_total_mean
df_rel[waves[j],"total_LL"] = rel_total_LL
df_rel[waves[j],"total_UL"] = rel_total_UL
copy1 = df_rel.copy(deep=True)
df_Re[waves[j],"work_mean"] = Re_work_mean
df_Re[waves[j],"work_LL"] = Re_work_LL
df_Re[waves[j],"work_UL"] = Re_work_UL
df_Re[waves[j], "rest_mean"] = Re_rest_mean
df_Re[waves[j],"rest_LL"] = Re_rest_LL
df_Re[waves[j],"rest_UL"] = Re_rest_UL
df_Re[waves[j], "home_mean"] = Re_home_mean
df_Re[waves[j], "home_LL"] = Re_home_LL
df_Re[waves[j], "home_UL"] = Re_home_UL
df_Re[waves[j],"schools_mean"] = Re_schools_mean
df_Re[waves[j],"schools_LL"] = Re_schools_LL
df_Re[waves[j],"schools_UL"] = Re_schools_UL
df_Re[waves[j],"total_mean"] = Re_total_mean
df_Re[waves[j],"total_LL"] = Re_total_LL
df_Re[waves[j],"total_UL"] = Re_total_UL
copy2 = df_Re.copy(deep=True)
df_abs[waves[j],"work_mean"] = abs_work_mean
df_abs[waves[j],"work_LL"] = abs_work_LL
df_abs[waves[j],"work_UL"] = abs_work_UL
df_abs[waves[j], "rest_mean"] = abs_rest_mean
df_abs[waves[j], "rest_LL"] = abs_rest_LL
df_abs[waves[j], "rest_UL"] = abs_rest_UL
df_abs[waves[j], "home_mean"] = abs_home_mean
df_abs[waves[j], "home_LL"] = abs_home_LL
df_abs[waves[j], "home_UL"] = abs_home_UL
df_abs[waves[j],"schools_mean"] = abs_schools_mean
df_abs[waves[j],"schools_LL"] = abs_schools_LL
df_abs[waves[j],"schools_UL"] = abs_schools_UL
df_abs[waves[j],"total_mean"] = abs_total_mean
df_abs[waves[j],"total_LL"] = abs_total_LL
df_abs[waves[j],"total_UL"] = abs_total_UL
df_rel = copy1
df_Re = copy2
#df_abs.to_excel('test.xlsx', sheet_name='Absolute contacts')
#df_rel.to_excel('test.xlsx', sheet_name='Relative contacts')
#df_Re.to_excel('test.xlsx', sheet_name='Effective reproduction number')
print(np.mean(df_abs["1","total_mean"][pd.to_datetime('2020-03-22'):pd.to_datetime('2020-05-04')]))
print(np.mean(df_Re["1","total_LL"][pd.to_datetime('2020-03-22'):pd.to_datetime('2020-05-04')]),
np.mean(df_Re["1","total_mean"][pd.to_datetime('2020-03-22'):pd.to_datetime('2020-05-04')]),
np.mean(df_Re["1","total_UL"][pd.to_datetime('2020-03-22'):pd.to_datetime('2020-05-04')]))
print(np.mean(df_abs["1","total_mean"][pd.to_datetime('2020-06-01'):pd.to_datetime('2020-07-01')]))
print(np.mean(df_Re["1","total_LL"][pd.to_datetime('2020-06-01'):pd.to_datetime('2020-07-01')]),
np.mean(df_Re["1","total_mean"][pd.to_datetime('2020-06-01'):pd.to_datetime('2020-07-01')]),
np.mean(df_Re["1","total_UL"][pd.to_datetime('2020-06-01'):pd.to_datetime('2020-07-01')]))
print(np.mean(df_abs["2","total_mean"][pd.to_datetime('2020-10-25'):pd.to_datetime('2020-11-16')]))
print(np.mean(df_Re["2","total_LL"][pd.to_datetime('2020-10-25'):pd.to_datetime('2020-11-16')]),
np.mean(df_Re["2","total_mean"][pd.to_datetime('2020-10-25'):pd.to_datetime('2020-11-16')]),
np.mean(df_Re["2","total_UL"][pd.to_datetime('2020-10-25'):pd.to_datetime('2020-11-16')]))
print(np.mean(df_abs["2","total_mean"][pd.to_datetime('2020-11-16'):pd.to_datetime('2020-12-18')]))
print(np.mean(df_Re["2","total_LL"][pd.to_datetime('2020-11-16'):pd.to_datetime('2020-12-18')]),
np.mean(df_Re["2","total_mean"][pd.to_datetime('2020-11-16'):pd.to_datetime('2020-12-18')]),
np.mean(df_Re["2","total_UL"][pd.to_datetime('2020-11-16'):pd.to_datetime('2020-12-18')]))
# ----------------------------
# Plot absolute contributions
# ----------------------------
xlims = [[pd.to_datetime('2020-03-01'), pd.to_datetime('2020-07-14')],[pd.to_datetime('2020-09-01'), pd.to_datetime('2021-02-01')]]
no_lockdown = [[pd.to_datetime('2020-03-01'), pd.to_datetime('2020-03-15')],[pd.to_datetime('2020-09-01'), pd.to_datetime('2020-10-19')]]
fig,axes=plt.subplots(nrows=2,ncols=1,figsize=(12,7))
for idx,ax in enumerate(axes):
ax.plot(df_abs.index, df_abs[waves[idx],"rest_mean"], color='blue', linewidth=2)
ax.plot(df_abs.index, df_abs[waves[idx],"work_mean"], color='red', linewidth=2)
ax.plot(df_abs.index, df_abs[waves[idx],"home_mean"], color='green', linewidth=2)
ax.plot(df_abs.index, df_abs[waves[idx],"schools_mean"], color='orange', linewidth=2)
ax.plot(df_abs.index, df_abs[waves[idx],"total_mean"], color='black', linewidth=1.5)
ax.xaxis.grid(False)
ax.yaxis.grid(False)
ax.set_ylabel('Absolute contacts (-)')
if idx == 0:
ax.legend(['leisure','work','home','schools','total'], bbox_to_anchor=(1.20, 1), loc='upper left')
ax.set_xlim(xlims[idx])
ax.axvspan(no_lockdown[idx][0], no_lockdown[idx][1], alpha=0.2, color='black')
ax2 = ax.twinx()
time = model_results[idx]['time']
vector_mean = model_results[idx]['vector_mean']
vector_LL = model_results[idx]['vector_LL']
vector_UL = model_results[idx]['vector_UL']
ax2.scatter(df_sciensano.index,df_sciensano['H_in'],color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.plot(time,vector_mean,'--', color='black', linewidth=1.5)
ax2.fill_between(time,vector_LL, vector_UL,alpha=0.20, color = 'black')
ax2.xaxis.grid(False)
ax2.yaxis.grid(False)
ax2.set_xlim(xlims[idx])
ax2.set_ylabel('New hospitalisations (-)')
ax = _apply_tick_locator(ax)
ax2 = _apply_tick_locator(ax2)
plt.tight_layout()
plt.show()
plt.close()
# ----------------------------
# Plot relative contributions
# ----------------------------
fig,axes=plt.subplots(nrows=2,ncols=1,figsize=(12,7))
for idx,ax in enumerate(axes):
ax.plot(df_rel.index, df_rel[waves[idx],"rest_mean"], color='blue', linewidth=1.5)
ax.plot(df_rel.index, df_rel[waves[idx],"work_mean"], color='red', linewidth=1.5)
ax.plot(df_rel.index, df_rel[waves[idx],"home_mean"], color='green', linewidth=1.5)
ax.plot(df_rel.index, df_rel[waves[idx],"schools_mean"], color='orange', linewidth=1.5)
ax.xaxis.grid(False)
ax.yaxis.grid(False)
ax.set_ylabel('Relative contacts (-)')
if idx == 0:
ax.legend(['leisure','work','home','schools'], bbox_to_anchor=(1.20, 1), loc='upper left')
ax.set_xlim(xlims[idx])
ax.axvspan(no_lockdown[idx][0], no_lockdown[idx][1], alpha=0.2, color='black')
ax.set_yticks([0,0.25,0.50,0.75])
ax.set_ylim([0,0.85])
ax2 = ax.twinx()
time = model_results[idx]['time']
vector_mean = model_results[idx]['vector_mean']
vector_LL = model_results[idx]['vector_LL']
vector_UL = model_results[idx]['vector_UL']
ax2.scatter(df_sciensano.index,df_sciensano['H_in'],color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.plot(time,vector_mean,'--', color='black', linewidth=1.5)
ax2.fill_between(time,vector_LL, vector_UL,alpha=0.20, color = 'black')
ax2.xaxis.grid(False)
ax2.yaxis.grid(False)
ax2.set_xlim(xlims[idx])
ax2.set_ylabel('New hospitalisations (-)')
ax = _apply_tick_locator(ax)
ax2 = _apply_tick_locator(ax2)
plt.tight_layout()
plt.show()
plt.close()
# --------------------------------------------
# Plot relative contributions and cluster data
# --------------------------------------------
# Perform calculation
df_clusters = pd.read_csv('../../data/interim/sciensano/clusters.csv')
population_total = 11539326
population_schools = 2344395
population_work = 4893800 #https://stat.nbb.be/Index.aspx?DataSetCode=POPULA&lang=nl
home_rel = df_clusters['family']/population_total
work_rel = df_clusters['work']/population_work
schools_rel = df_clusters['schools']/population_schools
others_rel = df_clusters['others']/population_total
normalizer = df_clusters['family']/population_total + df_clusters['work']/population_work + df_clusters['schools']/population_schools + df_clusters['others']/population_total
df_clusters['family_rel'] = df_clusters['family']/population_total/normalizer
df_clusters['work_rel'] = df_clusters['work']/population_work/normalizer
df_clusters['schools_rel'] = df_clusters['schools']/population_schools/normalizer
df_clusters['others_rel'] = df_clusters['others']/population_total/normalizer
df_clusters['midpoint_week'] = pd.to_datetime(df_clusters['startdate_week'])+(pd.to_datetime(df_clusters['enddate_week'])-pd.to_datetime(df_clusters['startdate_week']))/2
# Make plot
fig,ax = plt.subplots(figsize=(12,5))
# Cluster data
ax.plot(df_clusters['midpoint_week'], df_clusters['others_rel'], '--',color='blue',linewidth=1.5)
ax.plot(df_clusters['midpoint_week'], df_clusters['work_rel'],'--', color='red',linewidth=1.5)
ax.plot(df_clusters['midpoint_week'], df_clusters['family_rel'],'--',color='green',linewidth=1.5)
ax.plot(df_clusters['midpoint_week'], df_clusters['schools_rel'],'--', color='orange',linewidth=1.5)
# Model relative share
#ax.plot(df_rel.index, df_rel['2',"rest_mean"], color='blue', linewidth=1.5)
#ax.plot(df_rel.index, df_rel['2',"work_mean"], color='red', linewidth=1.5)
#ax.plot(df_rel.index, df_rel['2',"home_mean"], color='green', linewidth=1.5)
#ax.plot(df_rel.index, df_rel['2',"schools_mean"], color='orange', linewidth=1.5)
ax.legend(['others','work','home','schools'], bbox_to_anchor=(1.10, 1), loc='upper left')
ax.scatter(df_clusters['midpoint_week'], df_clusters['others_rel'], color='blue')
ax.scatter(df_clusters['midpoint_week'], df_clusters['work_rel'], color='red')
ax.scatter(df_clusters['midpoint_week'], df_clusters['family_rel'],color='green')
ax.scatter(df_clusters['midpoint_week'], df_clusters['schools_rel'], color='orange')
# Shading of no lockdown zone
ax.axvspan('2020-09-01', '2020-10-19', alpha=0.2, color='black')
# Other style options
ax.set_ylabel('Normalized share of clusters (-)')
ax.grid(False)
ax = _apply_tick_locator(ax)
ax.set_ylim([0,0.80])
ax.set_yticks([0,0.25,0.50,0.75])
ax2 = ax.twinx()
time = model_results[1]['time']
vector_mean = model_results[1]['vector_mean']
vector_LL = model_results[1]['vector_LL']
vector_UL = model_results[1]['vector_UL']
ax2.scatter(df_sciensano.index,df_sciensano['H_in'],color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.plot(time,vector_mean,'--', color='black', linewidth=1.5)
ax2.fill_between(time,vector_LL, vector_UL,alpha=0.20, color = 'black')
ax2.scatter(df_sciensano.index,df_sciensano['H_in'],color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.set_xlim(['2020-09-01', '2021-02-20'])
ax2.set_ylabel('New hospitalisations (-)')
ax2.grid(False)
ax2 = _apply_tick_locator(ax2)
plt.tight_layout()
plt.show()
# ------------------------------
# Plot Reproduction numbers (1)
# ------------------------------
xlims = [[pd.to_datetime('2020-03-01'), pd.to_datetime('2020-07-14')],[pd.to_datetime('2020-09-01'), | pd.to_datetime('2021-02-01') | pandas.to_datetime |
from gdsctools import *
import pandas as pd
import pylab
import json
import tempfile
import os
def test_regression_report():
directory = tempfile.mkdtemp()
os.chdir(directory)
os.mkdir("data")
os.mkdir("images")
IC = gdsctools_data("IC50_v5.csv.gz")
GF = gdsctools_data("genomic_features_v5.csv.gz")
PREFIX = "gdsctools_regression_"
IMAGE_DIR = "images"
DATA_PREFIX = "data/" + PREFIX
IMAGE_PREFIX = "images/" + PREFIX
gd = regression.GDSCLasso(IC, GF)
DRUGIDS = gd.drugIds[0:4]
config = {"boxplot_n":5, "randomness":5}
# Get best model
inputs = []
for drugid in DRUGIDS:
res = gd.runCV(drugid, verbose=False, kfolds=10)
bestmodel = gd.get_model(alpha=res.alpha)
def _pngname(tag):
return IMAGE_PREFIX + "%s_%s.png" % (tag, drugid)
# Plot weights
weights = gd.plot_weight(drugid, bestmodel)
if len(weights):
pylab.savefig(_pngname("weights"))
pylab.close()
weights = pd.DataFrame({
"weigths": res.coefficients,
"features":gd.feature_names})
output = DATA_PREFIX + "weights_{}.csv".format(drugid)
weights.to_csv(output, index=False)
# Plot importance
weights = gd.plot_importance(drugid, bestmodel)
if len(weights):
pylab.savefig(_pngname("importance"))
pylab.close()
# Boxplots
boxres = gd.boxplot(drugid, model=bestmodel, n=5,
bx_vert=False)
if len(boxres['data']):
pylab.savefig(_pngname("boxplot"))
pylab.close()
# Bayes factor
ran = gd.check_randomness(drugid, 10, 10)
pylab.savefig(_pngname("randomness"))
pylab.close()
results = {"drugid": int(drugid),
"Rp":res.Rp,
"alpha": res.alpha,
"ln_alpha": res.ln_alpha,
"ttest": ran['ttest_pval'],
"bayes":ran['bayes_factor']}
output = DATA_PREFIX + "results_{}.json".format(drugid)
fh = open(output, "w")
json.dump(results, fh)
fh.close()
inputs.append(output)
# gather all results:
data = []
for this in inputs:
with open(this, "r") as fh:
data.append(json.loads(fh.read()))
df = | pd.DataFrame(data) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.