prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import datetime
import inspect
import logging
import numpy.testing as npt
import os.path
import pandas as pd
import pkgutil
import sys
from tabulate import tabulate
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO, BytesIO
# #find parent directory and import model
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from ..iec_exe import Iec, IecOutputs
#print(sys.path)
#print(os.path)
# load transposed qaqc data for inputs and expected outputs
# this works for both local nosetests and travis deploy
#input details
try:
if __package__ is not None:
csv_data = pkgutil.get_data(__package__, 'iec_qaqc_in_transpose.csv')
data_inputs = BytesIO(csv_data)
pd_obj_inputs = pd.read_csv(data_inputs, index_col=0, engine='python')
else:
csv_transpose_path_in = os.path.join(os.path.dirname(__file__),"iec_qaqc_in_transpose.csv")
print(csv_transpose_path_in)
pd_obj_inputs = pd.read_csv(csv_transpose_path_in, index_col=0, engine='python')
pd_obj_inputs['csrfmiddlewaretoken'] = '<PASSWORD>'
#with open('./iec_qaqc_in_transpose.csv') as f:
#csv_data = csv.reader(f)
finally:
pass
#print('iec inputs')
#print('iec input dimensions ' + str(pd_obj_inputs.shape))
#print('iec input keys ' + str(pd_obj_inputs.columns.values.tolist()))
#print(pd_obj_inputs)
# load transposed qaqc data for expected outputs
# works for local nosetests from parent directory
# but not for travis container that calls nosetests:
# csv_transpose_path_exp = "./terrplant_qaqc_exp_transpose.csv"
# pd_obj_exp_out = pd.read_csv(csv_transpose_path_exp, index_col=0, engine='python')
# print(pd_obj_exp_out)
# this works for both local nosetests and travis deploy
#expected output details
try:
if __package__ is not None:
data_exp_outputs = BytesIO(pkgutil.get_data(__package__, 'iec_qaqc_exp_transpose.csv'))
pd_obj_exp = pd.read_csv(data_exp_outputs, index_col=0, engine= 'python')
#print("iec expected outputs")
#print('iec expected output dimensions ' + str(pd_obj_exp.shape))
#print('iec expected output keys ' + str(pd_obj_exp.columns.values.tolist()))
else:
#csv_transpose_path_exp = "./iec_qaqc_exp_transpose.csv"
csv_transpose_path_exp = os.path.join(os.path.dirname(__file__),"iec_qaqc_exp_transpose.csv")
#print(csv_transpose_path_exp)
pd_obj_exp = pd.read_csv(csv_transpose_path_exp, index_col=0, engine='python')
finally:
pass
#print('iec expected')
# create an instance of iec object with qaqc data
#print("####")
#print("dead here")
iec_output_empty = IecOutputs()
iec_calc = Iec(pd_obj_inputs, pd_obj_exp)
iec_calc.execute_model()
inputs_json, outputs_json, exp_out_json = iec_calc.get_dict_rep()
#print("iec output")
#print(inputs_json)
#
#print(tabulate(pd_obj_inputs.iloc[:,0:5], headers='keys', tablefmt='fancy_grid'))
#print(tabulate(pd_obj_inputs.iloc[:,6:11], headers='keys', tablefmt='fancy_grid'))
#print(tabulate(pd_obj_inputs.iloc[:,12:17], headers='keys', tablefmt='fancy_grid'))
#
#print(tabulate(pd_obj_exp.iloc[:,0:1], headers='keys', tablefmt='fancy_grid'))
#logging.info("###iec_calc.pd_obj_exp")
#logging.info(iec_calc.pd_obj_exp)
test = {}
class TestIec(unittest.TestCase):
"""
Integration tests for SIP model.
"""
print("iec integration tests conducted at " + str(datetime.datetime.today()))
def __init__(self, *args, **kwargs):
"""
adding to TestCase constructor so super
:param args:
:param kwargs:
:return:
"""
super(TestIec, self).__init__(*args, **kwargs)
self.ncases = len(pd_obj_inputs)
def setUp(self):
"""
Test setup method.
:return:
"""
pass
def tearDown(self):
"""
teardown called after each test
e.g. maybe write test results to some text file
:return:
"""
def test_assert_output_series(self):
""" Verify that each output variable is a pd.Series """
try:
num_variables = len(iec_calc.pd_obj_out.columns)
result = pd.Series(False, index=list(range(num_variables)), dtype='bool')
expected = pd.Series(True, index=list(range(num_variables)), dtype='bool')
for i in range(num_variables):
column_name = iec_calc.pd_obj_out.columns[i]
output = getattr(iec_calc, column_name)
if isinstance(output, pd.Series):
result[i] = True
tab = pd.concat([result,expected], axis=1)
print('model output properties as pandas series')
print(tabulate(tab, headers='keys', tablefmt='fancy_grid'))
npt.assert_array_equal(result, expected)
finally:
pass
return
def test_assert_output_series_dtypes(self):
""" Verify that each output variable is the correct dtype """
try:
num_variables = len(iec_calc.pd_obj_out.columns)
#get the string of the type that is expected and the type that has resulted
result = pd.Series(False, index=list(range(num_variables)), dtype='bool')
expected = pd.Series(True, index=list(range(num_variables)), dtype='bool')
for i in range(num_variables):
column_name = iec_calc.pd_obj_out.columns[i]
output_result = getattr(iec_calc, column_name)
column_dtype_result = output_result.dtype.name
output_expected = getattr(iec_output_empty, column_name)
output_expected2 = getattr(iec_calc.pd_obj_out, column_name)
column_dtype_expected = output_expected.dtype.name
if column_dtype_result == column_dtype_expected:
result[i] = True
#tab = pd.concat([result,expected], axis=1)
if(result[i] != expected[i]):
print(i)
print(column_name)
print(str(result[i]) + "/" + str(expected[i]))
print(column_dtype_result + "/" + column_dtype_expected)
print('result')
print(output_result)
print('expected')
print(output_expected2)
#print(tabulate(tab, headers='keys', tablefmt='fancy_grid'))
npt.assert_array_equal(result, expected)
finally:
pass
return
def test_iec_integration_z_score_f(self):
"""
integration test for output iec.z_score_f
"""
try:
func_name = inspect.currentframe().f_code.co_name
self.blackbox_method_int('z_score_f', func_name)
finally:
pass
return
def test_iec_integration_f8_f(self):
"""
integration test for output iec.f8_f
"""
try:
func_name = inspect.currentframe().f_code.co_name
self.blackbox_method_int('f8_f', func_name)
finally:
pass
return
def test_iec_integration_chance_f(self):
"""
integration test for output iec.chance_f
"""
try:
func_name = inspect.currentframe().f_code.co_name
self.blackbox_method_int('chance_f', func_name)
finally:
pass
return
def blackbox_method_int(self, output, func_name):
"""
Helper method to reuse code for testing numpy array outputs from SIP model
:param output: String; Pandas Series name (e.g. column name) without '_out'
:return:
"""
try:
# display model output in scientific notation
pd.set_option('display.float_format','{:.4E}'.format)
logging.info('### blackbox out_' + output)
logging.info(iec_calc.pd_obj_out)
result = iec_calc.pd_obj_out["out_" + output]
expected = iec_calc.pd_obj_exp["exp_" + output]
tab = | pd.concat([result, expected], axis=1) | pandas.concat |
import pickle
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
from sklearn.model_selection import StratifiedKFold
from skopt.space import Categorical
from evalml import AutoMLSearch
from evalml.automl.pipeline_search_plots import SearchIterationPlot
from evalml.exceptions import PipelineNotFoundError
from evalml.model_family import ModelFamily
from evalml.objectives import (
FraudCost,
Precision,
PrecisionMicro,
Recall,
get_objective
)
from evalml.pipelines import (
GeneratedPipelineBinary,
GeneratedPipelineMulticlass,
GeneratedPipelineTimeSeriesBinary,
GeneratedPipelineTimeSeriesMulticlass,
ModeBaselineBinaryPipeline,
ModeBaselineMulticlassPipeline,
MulticlassClassificationPipeline,
PipelineBase,
TimeSeriesBaselineBinaryPipeline,
TimeSeriesBaselineMulticlassPipeline
)
from evalml.pipelines.components.utils import get_estimators
from evalml.pipelines.utils import make_pipeline
from evalml.preprocessing import TimeSeriesSplit, split_data
from evalml.problem_types import ProblemTypes
def test_init(X_y_binary):
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', max_iterations=1, n_jobs=1)
automl.search()
assert automl.n_jobs == 1
assert isinstance(automl.rankings, pd.DataFrame)
assert isinstance(automl.best_pipeline, PipelineBase)
automl.best_pipeline.predict(X)
# test with dataframes
automl = AutoMLSearch(pd.DataFrame(X), pd.Series(y), problem_type='binary', max_iterations=1, n_jobs=1)
automl.search()
assert isinstance(automl.rankings, pd.DataFrame)
assert isinstance(automl.full_rankings, pd.DataFrame)
assert isinstance(automl.best_pipeline, PipelineBase)
assert isinstance(automl.get_pipeline(0), PipelineBase)
assert automl.objective.name == 'Log Loss Binary'
automl.best_pipeline.predict(X)
def test_init_objective(X_y_binary):
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective=Precision(), max_iterations=1)
assert isinstance(automl.objective, Precision)
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='Precision', max_iterations=1)
assert isinstance(automl.objective, Precision)
def test_get_pipeline_none(X_y_binary):
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary')
with pytest.raises(PipelineNotFoundError, match="Pipeline not found"):
automl.describe_pipeline(0)
def test_data_splitter(X_y_binary):
X, y = X_y_binary
cv_folds = 5
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', data_splitter=StratifiedKFold(cv_folds), max_iterations=1,
n_jobs=1)
automl.search()
assert isinstance(automl.rankings, pd.DataFrame)
assert len(automl.results['pipeline_results'][0]["cv_data"]) == cv_folds
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', data_splitter=TimeSeriesSplit(n_splits=cv_folds),
max_iterations=1, n_jobs=1)
automl.search()
assert isinstance(automl.rankings, pd.DataFrame)
assert len(automl.results['pipeline_results'][0]["cv_data"]) == cv_folds
def test_max_iterations(X_y_binary):
X, y = X_y_binary
max_iterations = 5
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', max_iterations=max_iterations, n_jobs=1)
automl.search()
assert len(automl.full_rankings) == max_iterations
def test_recall_error(X_y_binary):
X, y = X_y_binary
# Recall is a valid objective but it's not allowed in AutoML so a ValueError is expected
error_msg = 'recall is not allowed in AutoML!'
with pytest.raises(ValueError, match=error_msg):
AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='recall', max_iterations=1)
def test_recall_object(X_y_binary):
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective=Recall(), max_iterations=1, n_jobs=1)
automl.search()
assert len(automl.full_rankings) > 0
assert automl.objective.name == 'Recall'
def test_binary_auto(X_y_binary):
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective="Log Loss Binary", max_iterations=5, n_jobs=1)
automl.search()
best_pipeline = automl.best_pipeline
assert best_pipeline._is_fitted
y_pred = best_pipeline.predict(X)
assert len(np.unique(y_pred.to_series())) == 2
def test_multi_auto(X_y_multi, multiclass_core_objectives):
X, y = X_y_multi
objective = PrecisionMicro()
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='multiclass', objective=objective, max_iterations=5, n_jobs=1)
automl.search()
best_pipeline = automl.best_pipeline
assert best_pipeline._is_fitted
y_pred = best_pipeline.predict(X)
assert len(np.unique(y_pred.to_series())) == 3
objective_in_additional_objectives = next((obj for obj in multiclass_core_objectives if obj.name == objective.name), None)
multiclass_core_objectives.remove(objective_in_additional_objectives)
for expected, additional in zip(multiclass_core_objectives, automl.additional_objectives):
assert type(additional) is type(expected)
def test_multi_objective(X_y_multi):
X, y = X_y_multi
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective="Log Loss Binary")
assert automl.problem_type == ProblemTypes.BINARY
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='multiclass', objective="Log Loss Multiclass")
assert automl.problem_type == ProblemTypes.MULTICLASS
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='multiclass', objective='AUC Micro')
assert automl.problem_type == ProblemTypes.MULTICLASS
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='AUC')
assert automl.problem_type == ProblemTypes.BINARY
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='multiclass')
assert automl.problem_type == ProblemTypes.MULTICLASS
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary')
assert automl.problem_type == ProblemTypes.BINARY
def test_categorical_classification(X_y_categorical_classification):
X, y = X_y_categorical_classification
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective="precision", max_iterations=5, n_jobs=1)
automl.search()
assert not automl.rankings['score'].isnull().all()
def test_random_seed(X_y_binary):
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective=Precision(), max_iterations=5, random_seed=0, n_jobs=1)
automl.search()
automl_1 = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective=Precision(), max_iterations=5, random_seed=0, n_jobs=1)
automl_1.search()
assert automl.rankings.equals(automl_1.rankings)
def test_callback(X_y_binary):
X, y = X_y_binary
counts = {
"start_iteration_callback": 0,
"add_result_callback": 0,
}
def start_iteration_callback(pipeline_class, parameters, automl_obj, counts=counts):
counts["start_iteration_callback"] += 1
def add_result_callback(results, trained_pipeline, automl_obj, counts=counts):
counts["add_result_callback"] += 1
max_iterations = 3
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective=Precision(), max_iterations=max_iterations,
start_iteration_callback=start_iteration_callback,
add_result_callback=add_result_callback,
n_jobs=1)
automl.search()
assert counts["start_iteration_callback"] == max_iterations
assert counts["add_result_callback"] == max_iterations
def test_additional_objectives(X_y_binary):
X, y = X_y_binary
objective = FraudCost(retry_percentage=.5,
interchange_fee=.02,
fraud_payout_percentage=.75,
amount_col=10)
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='F1', max_iterations=2, additional_objectives=[objective],
n_jobs=1)
automl.search()
results = automl.describe_pipeline(0, return_dict=True)
assert 'Fraud Cost' in list(results["cv_data"][0]["all_objective_scores"].keys())
@patch('evalml.objectives.BinaryClassificationObjective.optimize_threshold')
@patch('evalml.pipelines.BinaryClassificationPipeline.predict_proba')
@patch('evalml.pipelines.BinaryClassificationPipeline.score')
@patch('evalml.pipelines.BinaryClassificationPipeline.fit')
def test_optimizable_threshold_enabled(mock_fit, mock_score, mock_predict_proba, mock_optimize_threshold, X_y_binary, caplog):
mock_optimize_threshold.return_value = 0.8
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='precision', max_iterations=1, optimize_thresholds=True)
mock_score.return_value = {automl.objective.name: 1.0}
automl.search()
mock_fit.assert_called()
mock_score.assert_called()
mock_predict_proba.assert_called()
mock_optimize_threshold.assert_called()
assert automl.best_pipeline.threshold is not None
assert automl.results['pipeline_results'][0]['cv_data'][0].get('binary_classification_threshold') == 0.8
assert automl.results['pipeline_results'][0]['cv_data'][1].get('binary_classification_threshold') == 0.8
assert automl.results['pipeline_results'][0]['cv_data'][2].get('binary_classification_threshold') == 0.8
automl.describe_pipeline(0)
out = caplog.text
assert "Objective to optimize binary classification pipeline thresholds for" in out
@patch('evalml.objectives.BinaryClassificationObjective.optimize_threshold')
@patch('evalml.pipelines.BinaryClassificationPipeline.predict_proba')
@patch('evalml.pipelines.BinaryClassificationPipeline.score')
@patch('evalml.pipelines.BinaryClassificationPipeline.fit')
def test_optimizable_threshold_disabled(mock_fit, mock_score, mock_predict_proba, mock_optimize_threshold, X_y_binary):
mock_optimize_threshold.return_value = 0.8
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='precision', max_iterations=1, optimize_thresholds=False)
mock_score.return_value = {automl.objective.name: 1.0}
automl.search()
mock_fit.assert_called()
mock_score.assert_called()
assert not mock_predict_proba.called
assert not mock_optimize_threshold.called
assert automl.best_pipeline.threshold is not None
assert automl.results['pipeline_results'][0]['cv_data'][0].get('binary_classification_threshold') == 0.5
assert automl.results['pipeline_results'][0]['cv_data'][1].get('binary_classification_threshold') == 0.5
assert automl.results['pipeline_results'][0]['cv_data'][2].get('binary_classification_threshold') == 0.5
@patch('evalml.pipelines.BinaryClassificationPipeline.score')
@patch('evalml.pipelines.BinaryClassificationPipeline.fit')
def test_non_optimizable_threshold(mock_fit, mock_score, X_y_binary):
mock_score.return_value = {"AUC": 1.0}
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='AUC', max_iterations=1)
automl.search()
mock_fit.assert_called()
mock_score.assert_called()
assert automl.best_pipeline.threshold is None
assert automl.results['pipeline_results'][0]['cv_data'][0].get('binary_classification_threshold') is None
assert automl.results['pipeline_results'][0]['cv_data'][1].get('binary_classification_threshold') is None
assert automl.results['pipeline_results'][0]['cv_data'][2].get('binary_classification_threshold') is None
def test_describe_pipeline_objective_ordered(X_y_binary, caplog):
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='AUC', max_iterations=2, n_jobs=1)
automl.search()
automl.describe_pipeline(0)
out = caplog.text
out_stripped = " ".join(out.split())
objectives = [get_objective(obj) for obj in automl.additional_objectives]
objectives_names = [obj.name for obj in objectives]
expected_objective_order = " ".join(objectives_names)
assert expected_objective_order in out_stripped
def test_max_time_units(X_y_binary):
X, y = X_y_binary
str_max_time = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='F1', max_time='60 seconds')
assert str_max_time.max_time == 60
hour_max_time = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='F1', max_time='1 hour')
assert hour_max_time.max_time == 3600
min_max_time = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='F1', max_time='30 mins')
assert min_max_time.max_time == 1800
min_max_time = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='F1', max_time='30 s')
assert min_max_time.max_time == 30
with pytest.raises(AssertionError, match="Invalid unit. Units must be hours, mins, or seconds. Received 'year'"):
AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='F1', max_time='30 years')
with pytest.raises(TypeError, match="Parameter max_time must be a float, int, string or None. Received <class 'tuple'> with value \\(30, 'minutes'\\)."):
AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective='F1', max_time=(30, 'minutes'))
def test_plot_disabled_missing_dependency(X_y_binary, has_minimal_dependencies):
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', max_iterations=3)
if has_minimal_dependencies:
with pytest.raises(AttributeError):
automl.plot.search_iteration_plot
else:
automl.plot.search_iteration_plot
def test_plot_iterations_max_iterations(X_y_binary):
go = pytest.importorskip('plotly.graph_objects', reason='Skipping plotting test because plotly not installed')
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective="f1", max_iterations=3, n_jobs=1)
automl.search()
plot = automl.plot.search_iteration_plot()
plot_data = plot.data[0]
x = pd.Series(plot_data['x'])
y = pd.Series(plot_data['y'])
assert isinstance(plot, go.Figure)
assert x.is_monotonic_increasing
assert y.is_monotonic_increasing
assert len(x) == 3
assert len(y) == 3
def test_plot_iterations_max_time(X_y_binary):
go = pytest.importorskip('plotly.graph_objects', reason='Skipping plotting test because plotly not installed')
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', objective="f1", max_time=10, n_jobs=1)
automl.search(show_iteration_plot=False)
plot = automl.plot.search_iteration_plot()
plot_data = plot.data[0]
x = pd.Series(plot_data['x'])
y = | pd.Series(plot_data['y']) | pandas.Series |
from collections import OrderedDict
import contextlib
from datetime import datetime, time
from functools import partial
import os
from urllib.error import URLError
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.util.testing as tm
@contextlib.contextmanager
def ignore_xlrd_time_clock_warning():
"""
Context manager to ignore warnings raised by the xlrd library,
regarding the deprecation of `time.clock` in Python 3.7.
"""
with warnings.catch_warnings():
warnings.filterwarnings(
action="ignore",
message="time.clock has been deprecated",
category=DeprecationWarning,
)
yield
read_ext_params = [".xls", ".xlsx", ".xlsm", ".ods"]
engine_params = [
# Add any engines to test here
# When defusedxml is installed it triggers deprecation warnings for
# xlrd and openpyxl, so catch those here
pytest.param(
"xlrd",
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param(
"openpyxl",
marks=[
td.skip_if_no("openpyxl"),
pytest.mark.filterwarnings("ignore:.*html argument"),
],
),
pytest.param(
None,
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param("odf", marks=td.skip_if_no("odf")),
]
def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool:
"""
Filter out invalid (engine, ext) pairs instead of skipping, as that
produces 500+ pytest.skips.
"""
engine = engine.values[0]
if engine == "openpyxl" and read_ext == ".xls":
return False
if engine == "odf" and read_ext != ".ods":
return False
if read_ext == ".ods" and engine != "odf":
return False
return True
def _transfer_marks(engine, read_ext):
"""
engine gives us a pytest.param objec with some marks, read_ext is just
a string. We need to generate a new pytest.param inheriting the marks.
"""
values = engine.values + (read_ext,)
new_param = pytest.param(values, marks=engine.marks)
return new_param
@pytest.fixture(
autouse=True,
params=[
_transfer_marks(eng, ext)
for eng in engine_params
for ext in read_ext_params
if _is_valid_engine_ext_pair(eng, ext)
],
)
def engine_and_read_ext(request):
"""
Fixture for Excel reader engine and read_ext, only including valid pairs.
"""
return request.param
@pytest.fixture
def engine(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return engine
@pytest.fixture
def read_ext(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return read_ext
class TestReaders:
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for read_excel calls.
"""
func = partial(pd.read_excel, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "read_excel", func)
def test_usecols_int(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["A", "B", "C"])
# usecols as int
msg = "Passing an integer for `usecols`"
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols=3)
# usecols as int
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=3
)
def test_usecols_list(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["B", "C"])
df1 = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=[0, 2, 3]
)
df2 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=[0, 2, 3]
)
# TODO add index to xls file)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_usecols_str(self, read_ext, df_ref):
df1 = df_ref.reindex(columns=["A", "B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A:D"
)
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C,D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C,D"
)
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C:D"
)
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
@pytest.mark.parametrize(
"usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]]
)
def test_usecols_diff_positional_int_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["A", "C"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=usecols
)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]])
def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["B", "D"]]
expected.index = range(len(expected))
result = pd.read_excel("test1" + read_ext, "Sheet1", usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
def test_read_excel_without_slicing(self, read_ext, df_ref):
expected = df_ref
result = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str(self, read_ext, df_ref):
expected = df_ref[["C", "D"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols="A,D:E"
)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str_invalid(self, read_ext):
msg = "Invalid column name: E1"
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, "Sheet1", usecols="D:E1")
def test_index_col_label_error(self, read_ext):
msg = "list indices must be integers.*, not str"
with pytest.raises(TypeError, match=msg):
pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=["A"], usecols=["A", "C"]
)
def test_index_col_empty(self, read_ext):
# see gh-9208
result = pd.read_excel("test1" + read_ext, "Sheet3", index_col=["A", "B", "C"])
expected = DataFrame(
columns=["D", "E", "F"],
index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [None, 2])
def test_index_col_with_unnamed(self, read_ext, index_col):
# see gh-18792
result = pd.read_excel("test1" + read_ext, "Sheet4", index_col=index_col)
expected = DataFrame(
[["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"]
)
if index_col:
expected = expected.set_index(expected.columns[index_col])
tm.assert_frame_equal(result, expected)
def test_usecols_pass_non_existent_column(self, read_ext):
msg = (
"Usecols do not match columns, "
"columns expected but not found: " + r"\['E'\]"
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E"])
def test_usecols_wrong_type(self, read_ext):
msg = (
"'usecols' must either be list-like of "
"all strings, all unicode, all integers or a callable."
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E1", 0])
def test_excel_stop_iterator(self, read_ext):
parsed = pd.read_excel("test2" + read_ext, "Sheet1")
expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self, read_ext):
parsed = pd.read_excel("test3" + read_ext, "Sheet1")
expected = DataFrame([[np.nan]], columns=["Test"])
tm.assert_frame_equal(parsed, expected)
def test_excel_table(self, read_ext, df_ref):
df1 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
df2 = pd.read_excel("test1" + read_ext, "Sheet2", skiprows=[1], index_col=0)
# TODO add index to file
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
df3 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_reader_special_dtypes(self, read_ext):
expected = DataFrame.from_dict(
OrderedDict(
[
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
("StrCol", [1, 2, 3, 4, 5]),
# GH5394 - this is why convert_float isn't vectorized
("Str2Col", ["a", 3, "c", "d", "e"]),
(
"DateCol",
[
datetime(2013, 10, 30),
datetime(2013, 10, 31),
datetime(1905, 1, 1),
datetime(2013, 12, 14),
datetime(2015, 3, 14),
],
),
]
)
)
basename = "test_types"
# should read in correctly and infer types
actual = pd.read_excel(basename + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
actual = pd.read_excel(basename + read_ext, "Sheet1", convert_float=False)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = pd.read_excel(basename + read_ext, "Sheet1", index_col=icol)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = pd.read_excel(
basename + read_ext, "Sheet1", converters={"StrCol": str}
)
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
actual = pd.read_excel(
basename + read_ext,
"Sheet1",
convert_float=False,
converters={"StrCol": str},
)
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self, read_ext):
basename = "test_converters"
expected = DataFrame.from_dict(
OrderedDict(
[
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ["Found", "Found", "Found", "Not found", "Found"]),
("StrCol", ["1", np.nan, "3", "4", "5"]),
]
)
)
converters = {
"IntCol": lambda x: int(x) if x != "" else -1000,
"FloatCol": lambda x: 10 * x if x else np.nan,
2: lambda x: "Found" if x != "" else "Not found",
3: lambda x: str(x) if x else "",
}
# should read in correctly and set types of single cells (not array
# dtypes)
actual = pd.read_excel(basename + read_ext, "Sheet1", converters=converters)
tm.assert_frame_equal(actual, expected)
def test_reader_dtype(self, read_ext):
# GH 8212
basename = "testdtype"
actual = pd.read_excel(basename + read_ext)
expected = DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
).reindex(columns=["a", "b", "c", "d"])
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(
basename + read_ext, dtype={"a": "float64", "b": "float32", "c": str}
)
expected["a"] = expected["a"].astype("float64")
expected["b"] = expected["b"].astype("float32")
expected["c"] = ["001", "002", "003", "004"]
tm.assert_frame_equal(actual, expected)
with pytest.raises(ValueError):
pd.read_excel(basename + read_ext, dtype={"d": "int64"})
@pytest.mark.parametrize(
"dtype,expected",
[
(
None,
DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
),
),
(
{"a": "float64", "b": "float32", "c": str, "d": str},
DataFrame(
{
"a": Series([1, 2, 3, 4], dtype="float64"),
"b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
"c": ["001", "002", "003", "004"],
"d": ["1", "2", np.nan, "4"],
}
),
),
],
)
def test_reader_dtype_str(self, read_ext, dtype, expected):
# see gh-20377
basename = "testdtype"
actual = pd.read_excel(basename + read_ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_reading_all_sheets(self, read_ext):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
basename = "test_multisheet"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
# ensure this is not alphabetical to test order preservation
expected_keys = ["Charlie", "Alpha", "Beta"]
tm.assert_contains_all(expected_keys, dfs.keys())
# Issue 9930
# Ensure sheet order is preserved
assert expected_keys == list(dfs.keys())
def test_reading_multiple_specific_sheets(self, read_ext):
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
basename = "test_multisheet"
# Explicitly request duplicates. Only the set should be returned.
expected_keys = [2, "Charlie", "Charlie"]
dfs = pd.read_excel(basename + read_ext, sheet_name=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys, dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_reading_all_sheets_with_blank(self, read_ext):
# Test reading all sheetnames by setting sheetname to None,
# In the case where some sheets are blank.
# Issue #11711
basename = "blank_with_header"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
expected_keys = ["Sheet1", "Sheet2", "Sheet3"]
tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
def test_read_excel_blank(self, read_ext):
actual = pd.read_excel("blank" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, DataFrame())
def test_read_excel_blank_with_header(self, read_ext):
expected = DataFrame(columns=["col_1", "col_2"])
actual = pd.read_excel("blank_with_header" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
def test_date_conversion_overflow(self, read_ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
expected = pd.DataFrame(
[
[pd.Timestamp("2016-03-12"), "<NAME>"],
[pd.Timestamp("2016-03-16"), "<NAME>"],
[1e20, "<NAME>"],
],
columns=["DateColWithBigInt", "StringCol"],
)
if pd.read_excel.keywords["engine"] == "openpyxl":
pytest.xfail("Maybe not supported by openpyxl")
result = pd.read_excel("testdateoverflow" + read_ext)
tm.assert_frame_equal(result, expected)
def test_sheet_name(self, read_ext, df_ref):
filename = "test1"
sheet_name = "Sheet1"
df1 = pd.read_excel(
filename + read_ext, sheet_name=sheet_name, index_col=0
) # doc
with ignore_xlrd_time_clock_warning():
df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_excel_read_buffer(self, read_ext):
pth = "test1" + read_ext
expected = pd.read_excel(pth, "Sheet1", index_col=0)
with open(pth, "rb") as f:
actual = pd.read_excel(f, "Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
def test_bad_engine_raises(self, read_ext):
bad_engine = "foo"
with pytest.raises(ValueError, match="Unknown engine: foo"):
pd.read_excel("", engine=bad_engine)
@tm.network
def test_read_from_http_url(self, read_ext):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/excel/test1" + read_ext
)
url_table = pd.read_excel(url)
local_table = pd.read_excel("test1" + read_ext)
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_not_us_locale
def test_read_from_s3_url(self, read_ext, s3_resource):
# Bucket "pandas-test" created in tests/io/conftest.py
with open("test1" + read_ext, "rb") as f:
s3_resource.Bucket("pandas-test").put_object(Key="test1" + read_ext, Body=f)
url = "s3://pandas-test/test1" + read_ext
url_table = pd.read_excel(url)
local_table = pd.read_excel("test1" + read_ext)
tm.assert_frame_equal(url_table, local_table)
@pytest.mark.slow
# ignore warning from old xlrd
@pytest.mark.filterwarnings("ignore:This metho:PendingDeprecationWarning")
def test_read_from_file_url(self, read_ext, datapath):
# FILE
localtable = os.path.join(datapath("io", "data", "excel"), "test1" + read_ext)
local_table = pd.read_excel(localtable)
try:
url_table = pd.read_excel("file://localhost/" + localtable)
except URLError:
# fails on some systems
import platform
pytest.skip("failing on {}".format(" ".join(platform.uname()).strip()))
tm.assert_frame_equal(url_table, local_table)
def test_read_from_pathlib_path(self, read_ext):
# GH12655
from pathlib import Path
str_path = "test1" + read_ext
expected = pd.read_excel(str_path, "Sheet1", index_col=0)
path_obj = Path("test1" + read_ext)
actual = pd.read_excel(path_obj, "Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
@td.skip_if_no("py.path")
@td.check_file_leaks
def test_read_from_py_localpath(self, read_ext):
# GH12655
from py.path import local as LocalPath
str_path = os.path.join("test1" + read_ext)
expected = pd.read_excel(str_path, "Sheet1", index_col=0)
path_obj = LocalPath().join("test1" + read_ext)
actual = pd.read_excel(path_obj, "Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
def test_reader_seconds(self, read_ext):
# Test reading times with and without milliseconds. GH5945.
expected = DataFrame.from_dict(
{
"Time": [
time(1, 2, 3),
time(2, 45, 56, 100000),
time(4, 29, 49, 200000),
time(6, 13, 42, 300000),
time(7, 57, 35, 400000),
time(9, 41, 28, 500000),
time(11, 25, 21, 600000),
time(13, 9, 14, 700000),
time(14, 53, 7, 800000),
time(16, 37, 0, 900000),
time(18, 20, 54),
]
}
)
actual = pd.read_excel("times_1900" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel("times_1904" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex(self, read_ext):
# see gh-4679
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
mi_file = "testmultiindex" + read_ext
# "mi_column" sheet
expected = DataFrame(
[
[1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True],
],
columns=mi,
)
actual = pd.read_excel(mi_file, "mi_column", header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# "mi_index" sheet
expected.index = mi
expected.columns = ["a", "b", "c", "d"]
actual = pd.read_excel(mi_file, "mi_index", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "both" sheet
expected.columns = mi
actual = pd.read_excel(mi_file, "both", index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "mi_index_name" sheet
expected.columns = ["a", "b", "c", "d"]
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = pd.read_excel(mi_file, "mi_index_name", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# "mi_column_name" sheet
expected.index = list(range(4))
expected.columns = mi.set_names(["c1", "c2"])
actual = pd.read_excel(mi_file, "mi_column_name", header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# see gh-11317
# "name_with_int" sheet
expected.columns = mi.set_levels([1, 2], level=1).set_names(["c1", "c2"])
actual = pd.read_excel(mi_file, "name_with_int", index_col=0, header=[0, 1])
tm.assert_frame_equal(actual, expected)
# "both_name" sheet
expected.columns = mi.set_names(["c1", "c2"])
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = pd.read_excel(mi_file, "both_name", index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected)
# "both_skiprows" sheet
actual = pd.read_excel(
mi_file, "both_name_skiprows", index_col=[0, 1], header=[0, 1], skiprows=2
)
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex_header_only(self, read_ext):
# see gh-11733.
#
# Don't try to parse a header name if there isn't one.
mi_file = "testmultiindex" + read_ext
result = pd.read_excel(mi_file, "index_col_none", header=[0, 1])
exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")])
expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns)
tm.assert_frame_equal(result, expected)
def test_excel_old_index_format(self, read_ext):
# see gh-4679
filename = "test_index_name_pre17" + read_ext
# We detect headers to determine if index names exist, so
# that "index" name in the "names" version of the data will
# now be interpreted as rows that include null data.
data = np.array(
[
[None, None, None, None, None],
["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"],
]
)
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(
levels=[
["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"],
["R1", "R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"],
],
codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]],
names=[None, None],
)
si = Index(
["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None
)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(filename, "single_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(filename, "multi_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# The analogous versions of the "names" version data
# where there are explicitly no names for the indices.
data = np.array(
[
["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"],
]
)
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(
levels=[
["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"],
["R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"],
],
codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
names=[None, None],
)
si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(filename, "single_no_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(filename, "multi_no_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
def test_read_excel_bool_header_arg(self, read_ext):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
pd.read_excel("test1" + read_ext, header=arg)
def test_read_excel_chunksize(self, read_ext):
# GH 8011
with pytest.raises(NotImplementedError):
pd.read_excel("test1" + read_ext, chunksize=100)
def test_read_excel_skiprows_list(self, read_ext):
# GH 4903
actual = pd.read_excel(
"testskiprows" + read_ext, "skiprows_list", skiprows=[0, 2]
)
expected = DataFrame(
[
[1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True],
],
columns=["a", "b", "c", "d"],
)
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(
"testskiprows" + read_ext, "skiprows_list", skiprows=np.array([0, 2])
)
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows(self, read_ext):
# GH 16645
num_rows_to_pull = 5
actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull)
expected = pd.read_excel("test1" + read_ext)
expected = expected[:num_rows_to_pull]
| tm.assert_frame_equal(actual, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
from ..data import Data, DataSamples
from ..cross import DecisionTree, Crosses
#from ..woe import WOE
import pandas as pd
#import math as m
import numpy as np
import matplotlib
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import seaborn as sns
from sklearn.model_selection import cross_val_score, StratifiedKFold, train_test_split, GridSearchCV, PredefinedSplit
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score, roc_curve, auc, r2_score
from scipy.stats import chi2, chisquare, ks_2samp, ttest_ind
#import statsmodels.formula.api as sm
import warnings
from abc import ABCMeta, abstractmethod
#from sklearn.feature_selection import GenericUnivariateSelect, f_classif
from patsy import dmatrices
from statsmodels.stats.outliers_influence import variance_inflation_factor
import re
import ast
import os
import xlsxwriter
from PIL import Image
import datetime
from dateutil.relativedelta import *
from scipy.optimize import minimize
import copy
import itertools
import calendar
warnings.simplefilter('ignore')
plt.rc('font', family='Verdana')
plt.style.use('seaborn-darkgrid')
pd.set_option('display.precision', 3)
class ScoringModel(metaclass = ABCMeta):
'''
Base class for binary scoring models
'''
@abstractmethod
def __init__(self, model):
self.model = model
self.features = []
@abstractmethod
def fit(self, data):
pass
def predict(self, data, woe_transform=None):
'''
Predicts probability of target = 1
Parameters
-----------
data: Data to use for prediction, Data type
woe_transform: a WOE object to perform WoE-transformation before using model
Returns
-----------
matrix with shape [(sample size) X (number of classes)]
'''
if woe_transform is not None:
data=woe_transform.transform(data, keep_essential=True, original_values=True, calc_gini=False)
if self.features == []:
self.features = data.features
return self.model.predict_proba(data.dataframe[self.features])
def roc_curve(self, data, woe_transform=None, figsize=(10,7), filename = 'roc_curve', verbose = True):
'''
Displays ROC-curve and Gini coefficient for the model
Parameters
-----------
data: a Data or DataSamples object
woe_transform: a WOE object to perform WoE-transformation before using model
figsize: a tuple for graph size
filename: name of the picture with roc_curve
verbose: show/not show roc_curve in output
Returns
----------
a list of gini values per input sample
'''
if woe_transform is not None:
data=woe_transform.transform(data, keep_essential=True, original_values=True, calc_gini=False)
tpr={}
fpr={}
roc_auc={}
if type(data)==DataSamples:
samples=[data.train, data.validate, data.test]
sample_names=['Train', 'Validate', 'Test']
for si in range(len(samples)):
if samples[si] is not None:
preds = self.predict(samples[si])[:,1]
fpr[samples[si].name], tpr[samples[si].name], _ = roc_curve(samples[si].dataframe[samples[si].target], preds)
roc_auc[samples[si].name] = auc(fpr[samples[si].name], tpr[samples[si].name])
else:
fpr[sample_names[si]]=None
tpr[sample_names[si]]=None
roc_auc[sample_names[si]]=None
else:
preds = self.predict(data)[:,1]
fpr['Data'], tpr['Data'], _ = roc_curve(data.dataframe[data.target], preds)
roc_auc['Data'] = auc(fpr['Data'], tpr['Data'])
if verbose or (filename is not None):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# Plot tpr vs 1-fpr
for sample in roc_auc:
if roc_auc[sample] is not None:
ax.plot(fpr[sample], tpr[sample], label=sample+' (AUC = %f)' % roc_auc[sample])
ax.plot(tpr[list(tpr)[0]],tpr[list(tpr)[0]])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.legend()
if filename is not None:
plt.savefig(filename + ".png", dpi=100, bbox_inches='tight')
if verbose:
plt.show()
if verbose or (filename is not None):
plt.close()
ginis=[]
for sample in roc_auc:
if roc_auc[sample] is not None:
gini = round((roc_auc[sample]*2 - 1)*100, 2)
ginis.append(gini)
if verbose:
print ('Gini '+sample, gini)
return ginis
#---------------------------------------------------------------
class DecisionTreeModel(ScoringModel):
'''
Decision tree classifier
'''
def __init__(self, **args):
self.model = DecisionTreeClassifier(**args)
self.features = []
def fit(self, data):
if data.weights != None:
self.model.fit(data.dataframe[data.features], data.dataframe[data.target], sample_weight = np.array(data.dataframe[data.weights]))
else:
self.model.fit(data.dataframe[data.features], data.dataframe[data.target])
#---------------------------------------------------------------
class LogisticRegressionModel(ScoringModel):
'''
Logistic Regression for scoring.
Contains LogisticRegressionClassifier, its coefficients and intercept, scores and scoring card.
An object of this class can selected features, fit, edit coefficients, predict probabilities, calculate scores and transform a scorecard to SAS-code.
'''
def __init__(self, **args):
self.model = LogisticRegression(**args)
self.regularization = self.model.get_params()['penalty']
self.regularization_value = self.model.get_params()['C']
self.solver = self.model.get_params()['solver']
# Checks the type of optimizator as it is important for models on weighted samples
if self.model.solver != 'sag' and self.model.solver != 'newton-cg' and self.model.solver != 'lbfgs':
print ('Warning: this model does not support sample weights! For weighted scoring please use solver sag, newton-cg or lbfgs')
self.coefs = {}
self.features = []
self.scorecard = pd.DataFrame()
self.selected = []
#added 23.08.2018 by <NAME>
def inf_criterion(self, data, model=None, features=None, criterion='AIC', woe_transform=None):
'''
Calculation of information criterion (AIC/BIC) for given model on given data
Parameters
-----------
data: data for calculation
model: model with coefficients, that will be used to calculate information criterion
features: features to be used for information criterion calculation (in case model
was not fitted using its own selected features - model.selected)
criterion: type of information criterion to calculate
woe_transform: a woe object with binning information to perform WoE-transformation
Returns
-----------
value of information criterion
'''
if features is None:
features_initial=self.selected.copy()
else:
features_initial=features.copy()
if model is None:
model_to_check=self.model
else:
model_to_check=model
if woe_transform is not None:
data=woe_transform.transform(data, keep_essential=True, original_values=True, calc_gini=False)
features_kept=[]
weights_crit=[model_to_check.intercept_[0]]
for i in range(len(features_initial)):
if model_to_check.coef_[0][i]!=0:
features_kept.append(features_initial[i])
weights_crit.append(model_to_check.coef_[0][i])
intercept_crit = np.ones((data.dataframe.shape[0], 1))
features_crit = np.hstack((intercept_crit, data.dataframe[features_kept]))
scores_crit = np.dot(features_crit, weights_crit)
if data.weights is not None:
ll = np.sum(data.dataframe[data.weights]*(data.dataframe[data.target]*scores_crit - np.log(np.exp(scores_crit) + 1)))
else:
ll = np.sum(data.dataframe[data.target]*scores_crit - np.log(np.exp(scores_crit) + 1))
if criterion in ['aic', 'AIC']:
return 2*len(weights_crit)-2*ll
elif criterion in ['bic', 'BIC', 'sic', 'SIC', 'sbic', 'SBIC']:
if data.weights is not None:
return len(weights_crit)*np.log(data.dataframe[data.weights].sum())-2*ll
else:
return len(weights_crit)*np.log(data.dataframe.shape[0])-2*ll
#added 23.08.2018 by <NAME>
def wald_test(self, data, model=None, features=None, woe_transform=None, out=None, sep=';'):
'''
Calculation of Standard Errors (sqrt from diagonal of covariance matrix),
Wald Chi-Square (coefficient divided by SE and squared) and p-values for coefficicents
of given model on given data
Parameters
-----------
data: data for statistics calculation
model: model with coefficients, that will be used to calculate statistics
features: features to be used for statistics calculation (in case model
was not fitted using its own selected features - model.selected)
woe_transform: a woe object with binning information to perform WoE-transformation
out: a path for csv/xlsx output file to export
sep: the separator to be used in case of csv export
Returns
-----------
a dataframe with standard errors, wald statistics and p-values for feature coefficients
'''
if features is not None:
features_initial=features.copy()
else:
features_initial=self.features.copy()
if model is None:
model_to_check=self.model
else:
model_to_check=model
features_to_check=[]
coefs_list=[model_to_check.intercept_[0]]
for i in range(len(features_initial)):
if model_to_check.coef_[0][i]!=0:
features_to_check.append(features_initial[i])
coefs_list.append(model_to_check.coef_[0][i])
if woe_transform is not None:
data=woe_transform.transform(data, keep_essential=True, original_values=True, calc_gini=False)
# Calculate matrix of predicted class probabilities.
# Check resLogit.classes_ to make sure that sklearn ordered your classes as expected
predProbs = np.matrix(model_to_check.predict_proba(data.dataframe[features_initial]))
# Design matrix -- add column of 1's at the beginning of your X_train matrix
X_design = np.hstack((np.ones(shape = (data.dataframe[features_to_check].shape[0],1)),
data.dataframe[features_to_check]))
# Initiate matrix of 0's, fill diagonal with each predicted observation's variance
#not enough memory for big df
#V = np.matrix(np.zeros(shape = (X_design.shape[0], X_design.shape[0])))
#np.fill_diagonal(V, np.multiply(predProbs[:,0], predProbs[:,1]).A1)
if data.weights is not None:
V=np.multiply(np.matrix(data.dataframe[data.weights]).T, np.multiply(predProbs[:,0], predProbs[:,1])).A1
else:
V=np.multiply(predProbs[:,0], predProbs[:,1]).A1
# Covariance matrix
covLogit = np.linalg.inv(np.matrix(X_design.T * V) * X_design)
# Output
bse=np.sqrt(np.diag(covLogit))
wald=(coefs_list / bse) ** 2
pvalue=chi2.sf(wald, 1)
features_test=pd.DataFrame({'feature':['intercept']+[x for x in features_initial],
'coefficient':model_to_check.intercept_.tolist()+model_to_check.coef_[0].tolist()}).merge(pd.DataFrame({'feature':['intercept']+[x for x in features_to_check], 'se':bse,
'wald':wald,
'p-value':pvalue}),
on='feature',
how='left')
if out is not None:
if out[-4:]=='.csv':
features_test[['feature', 'coefficient', 'se', 'wald', 'p-value']].to_csv(out, sep = sep, index=False)
elif out[-4:]=='.xls' or out[-5:]=='.xlsx':
features_test[['feature', 'coefficient', 'se', 'wald', 'p-value']].to_excel(out, sheet_name='Missing', index=False)
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
return features_test[['feature', 'coefficient', 'se', 'wald', 'p-value']]
def regularized_feature_selection(self, data, regularization=None, regularization_value=None, features=None, solver = None,
features_to_leave = None, scoring = 'roc_auc', threshold = .05):
'''
Feature selection based on regularization - model uses all available features, then the features with
positive or insignificant coefficients are excluded (l1 regularization use is advised for more preliminary exclusion)
Parameters
-----------
data: data for modeling (type Data)
regularization: 'l1' for LASSO (less features to stay) or 'l2' for ridge (smaller coefficients) regression
regularization_value: effect of regularization will be more prominent for lesser regularization value
features: list of features to use for feature selection (if empty, then all features from data will be used)
features_to_leave: features that must be included in the model
scoring: type of score used to estimate the model quality
threshold: threshold for p-value when removing a feature
Returns
-----------
score for the model built on selected features and
list of selected features
'''
if features_to_leave is None:
features_to_leave=[]
if features is None:
features_to_check=data.features.copy()
else:
features_to_check=features.copy()
if regularization is None:
regularization=self.regularization
if regularization_value is None:
regularization_value=self.regularization_value
if solver is None:
solver = self.solver
# correctness check
for feature in features_to_leave:
if feature not in data.features:
print ('Feature is not available:', feature)
return None
if data.weights is None:
lr=LogisticRegression(solver=solver, penalty=regularization, C=regularization_value)
else:
lr=LogisticRegression(solver='sag', penalty=regularization, C=regularization_value)
if data.ginis is None or data.ginis == {}:
ginis=data.calc_gini()
else:
ginis=data.ginis
scores=[]
to_refit=True
while to_refit:
to_refit=False
if data.weights == None:
lr.fit(data.dataframe[features_to_check], data.dataframe[data.target])
else:
lr.fit(data.dataframe[features_to_check], data.dataframe[data.target],
sample_weight = data.dataframe[data.weights])
new_score = self.get_cv_score(Data(data.dataframe, target = data.target, features = features_to_check,
weights = data.weights),
scoring = scoring, selected_features=False)
scores.append(new_score)
positive_to_exclude=[x for x in np.asarray(features_to_check)[lr.coef_[0]>0] if x not in features_to_leave]
if len(positive_to_exclude)>0:
to_refit=True
features_to_exclude={x:ginis[x] for x in positive_to_exclude}
to_exclude=min(features_to_exclude, key=features_to_exclude.get)
print('Dropping ', to_exclude, 'with positive coefficient and gini =', ginis[to_exclude])
features_to_check.remove(to_exclude)
else:
wald=self.wald_test(data, model=lr, features=features_to_check)
feature_to_exclude_array=wald[(wald['p-value']>threshold) & (wald['p-value']==wald['p-value'].max()) & (wald['feature'].isin(features_to_leave+['intercept'])==False)]['feature'].values
if len(feature_to_exclude_array)>0:
to_refit=True
print('Dropping ', feature_to_exclude_array[0], 'with p-value =', wald[wald['feature']==feature_to_exclude_array[0]]['p-value'].values[0], 'and gini =', ginis[feature_to_exclude_array[0]])
features_to_check.remove(feature_to_exclude_array[0])
result_features=[]
for i in range(len(lr.coef_[0])):
if lr.coef_[0][i]==0:
print('Dropping ', features_to_check[i], 'with zero coefficient (gini =', ginis[features_to_check[i]], ')')
else:
result_features.append(features_to_check[i])
plt.plot(np.arange(len(scores)), scores, 'bo-', linewidth=2.0)
plt.xticks(np.arange(len(scores)), ['step ' + str(i) for i in np.arange(len(scores))], rotation = 'vertical')
plt.ylabel(scoring)
plt.title('Score changes')
plt.show()
self.selected = result_features
return new_score, self.selected
def stepwise_feature_selection(self, data, kind = 'mixed', features=None, features_initial=None, features_to_leave = None,
eps = .0005, scoring = 'roc_auc', forward_threshold = .05, backward_threshold = .05,
regularization=None, regularization_value=None):
'''
Stepwise feature selection can be of 3 types: forward, backward, mixed.
Forward: on each step the feature is selected that increases the score most while the score changes
are greater than epsilon.
Backward: starts from all the possible features, on each step removes the feature with the least score
decrease while the score changes are greater than epsilon (epsilon should be set to small negative value).
Mixed: each step contains 2 stages. Stage 1: the algorithm selects from features-candidates a significant
feature that increases the score most. Stage 2: from the features in models removes the feature with the
least significance for the model.
Parameters
-----------
data: data for modeling (type Data)
kind: type of the algorithm, can be 'forward', 'backward' or 'mixed'
features: list of features from which selection is working (if None, then data.features are used)
features_initial: starting feature set for feature selection
features_to_leave: features that must be included in the model
eps: minimum significant score difference
scoring: type of score used to estimate the model quality
forward_threshold: threshold for p-value when adding a feature
backward_threshold: threshold for p-value when removing a feature
regularization: type of regularization to be used for wald test (l1 or l2)
regularization_value: value of regularization parameter to be used for wald test
Returns
-----------
score for the model built on selected features
list of selected features
'''
if features_to_leave is None:
features_to_leave=[]
to_leave = features_to_leave.copy()
#final_features = []
candidates = []
if features is None:
features=data.features.copy()
best_scores = []
features_change = []
# correctness check
for feature in features_to_leave:
if feature not in data.features:
print ('Achtung bitte! Keine', feature)
return None
if features_initial is not None:
if feature not in features_initial:
print ('No', feature, 'in initial feature list provided! ')
return None
if regularization is None:
regularization=self.regularization
if regularization_value is None:
regularization_value=self.regularization_value
# Forward selection
if kind == 'forward':
print ('Forward feature selection started')
if features_initial is None:
features_initial=to_leave
for feature in features:
if feature not in features_initial:
candidates.append(feature)
features=features_initial.copy()
if len(features)>0:
prev_score = self.get_cv_score(Data(data.dataframe, target = data.target,
features = features, weights = data.weights),
scoring = scoring, selected_features=False)
best_scores.append(prev_score)
features_change.append('initial')
print('Initial features:', features, '', scoring, 'score', prev_score)
else:
prev_score=-1000
# maximum number of steps equals to the number of candidates
for i in range(len(candidates)):
# cross-validation scores for each of the remaining candidates of the step
cvs = {}
for feature in candidates:
tmp_features = features.copy()
# the feature is included in the model and the quality of the new model is calculated
tmp_features.append(feature)
try:
score = self.get_cv_score(Data(data.dataframe, target = data.target, features = tmp_features,
weights = data.weights),
scoring = scoring, selected_features=False)
except Exception:
pass
cvs[feature] = score
# looking for the best new feature
for f, s in cvs.items():
# warning: the metric is maximized
if s == max(cvs.values()):
# if the difference between the old and the new scores is greater than eps, the feature is added and the next step follows
if s - prev_score > eps:
print ('To add:', f, '', scoring, 'score:', cvs[f])
prev_score = s
features.append(f)
candidates.remove(f)
best_scores.append(s)
features_change.append(f)
# if the difference between the old and the new scoresis smaller than eps, the score dynamics are plotted and exit follows
else:
self.features = tmp_features
plt.plot(np.arange(len(features_change)), best_scores, 'bo-', linewidth=2.0)
plt.xticks(np.arange(len(features_change)), features_change, rotation = 'vertical')
plt.xlabel('Feature addition')
plt.ylabel(scoring)
plt.title('Stepwise score changes')
plt.show()
return prev_score, features
# if no features added
print('No features are available to add')
self.features = features
return prev_score, features
# Backward selection
elif kind == 'backward':
if features_initial is not None:
features=features_initial.copy()
for feature in features:
if feature not in to_leave:
candidates.append(feature)
print ('Backward selection started')
if len(features)>0:
prev_score = self.get_cv_score(Data(data.dataframe, target = data.target, features = features,
weights = data.weights), scoring = scoring, selected_features=False)
best_scores.append(prev_score)
features_change.append('initial')
print('Initial features:', features, '', scoring, 'score', prev_score)
else:
prev_score=-1000
#print('prev_score', prev_score, 'features', features, 'candidates', candidates)
# maximum number of steps equals to the number of candidates
for i in range(len(candidates)):
cvs = {}
if len(features)>1 and len(candidates)>0:
for feature in candidates:
tmp_features = features.copy()
# feature is removed and the cross-validation score is calculated
tmp_features.remove(feature)
cvs[feature] = self.get_cv_score(Data(data.dataframe, target = data.target, features = tmp_features,
weights = data.weights), scoring = scoring, selected_features=False)
else:
print('No features are available to exclude (at least 1 feature should remain)')
# searching for the feature that increases the quality most
features_=features.copy()
for f, s in cvs.items():
# if the difference between the old and the new scores is greater than eps, the feature is removed and the next step follows
if s == max(cvs.values()):
if s - prev_score > eps:
print ('To drop:', f, '', scoring, 'score:', cvs[f])
prev_score = s
candidates.remove(f)
features.remove(f)
best_scores.append(s)
features_change.append(f)
# if the quality increase is less than eps, exit
if features==features_ or len(candidates)==0:
if len(features)>1 and len(candidates):
print('All features exclusion cause too significant score decrease')
self.features = candidates + to_leave
plt.plot(np.arange(len(features_change)), best_scores, 'bo-', linewidth=2.0)
plt.xticks(np.arange(len(features_change)), features_change, rotation = 'vertical')
plt.xlabel('Features removed')
plt.ylabel(scoring)
plt.title('Stepwise score changes')
plt.show()
return prev_score, self.features
# if no feature was removed
return prev_score, features
# Mixed
elif kind == 'mixed':
print ('Mixed selection started')
if features_initial is None:
features_initial=to_leave
for feature in features:
if feature not in to_leave:
candidates.append(feature)
if data.weights is None:
lr=LogisticRegression(solver='saga', penalty=regularization, C=regularization_value)
else:
lr=LogisticRegression(solver='sag', penalty=regularization, C=regularization_value)
prev_score = -1000
result_features = features_initial.copy()
scores = []
feature_sets = []
if len(result_features)>0:
new_score = self.get_cv_score(Data(data.dataframe, target = data.target,
features = result_features, weights = data.weights),
scoring = scoring, selected_features=False)
scores.append(new_score)
feature_sets.append(set(result_features))
else:
new_score = 0
to_continue=True
while to_continue and len(candidates)> 0:
to_continue=False
prev_score = new_score
pvalues = {}
cvs = {}
for candidate in [x for x in candidates if (x in result_features)==False]:
# new feature addition and the model quality estimation
if data.weights == None:
lr.fit(data.dataframe[result_features + [candidate]], data.dataframe[data.target])
else:
lr.fit(data.dataframe[result_features + [candidate]], data.dataframe[data.target],
sample_weight = data.dataframe[data.weights])
new_score = self.get_cv_score(Data(data.dataframe, target = data.target,
features = result_features + [candidate], weights = data.weights),
scoring = scoring, selected_features=False)
wald=self.wald_test(data, model=lr, features=result_features + [candidate])
pvalues[candidate] = wald[wald['feature']==candidate]['p-value'].values[0]
cvs[candidate] = new_score
# searching for a significant feature that gives the greatest score increase
result_features_=result_features.copy()
for feature in sorted(cvs, key = cvs.get, reverse = True):
if pvalues[feature] < forward_threshold and feature != 'intercept':
print ('To add:', feature, '', scoring, 'score:', cvs[feature], ' p-value', pvalues[feature])
result_features.append(feature)
break
if result_features==result_features_:
print('No significant features to add were found')
else:
if set(result_features) in feature_sets:
print('Feature selection entered loop: terminating feature selection')
break
elif cvs[feature]-prev_score>eps:
to_continue=True
scores.append(cvs[feature])
feature_sets.append(set(result_features))
#print('result_features', result_features)
# the least significant feature is removed
# if it is Step1 then no removal
if len(result_features)>1:
if data.weights == None:
lr.fit(data.dataframe[result_features], data.dataframe[data.target])
else:
lr.fit(data.dataframe[result_features], data.dataframe[data.target],
sample_weight = data.dataframe[data.weights])
wald=self.wald_test(data, model=lr, features=result_features)
wald_to_check=wald[wald['feature'].isin(to_leave+['intercept'])==False]
#display(wald_to_check)
if max(wald_to_check['p-value']) > backward_threshold:
to_delete = wald_to_check[wald_to_check['p-value']==wald_to_check['p-value'].max()]['feature'].values[0]
"""if feature == to_delete:
candidates.remove(feature)
prev_score = prev_score-eps-0.05"""
result_features.remove(to_delete)
new_score = self.get_cv_score(Data(data.dataframe, target = data.target,
features = result_features, weights = data.weights),
scoring = scoring, selected_features=False)
print ('To drop:', to_delete, '', scoring, 'score', new_score, 'p-value', wald_to_check[wald_to_check['feature']==to_delete]['p-value'].values[0])
if set(result_features) in feature_sets:
print('Feature selection entered loop: terminating feature selection')
break
else:
to_continue=True
scores.append(new_score)
feature_sets.append(set(result_features))
elif wald_to_check[wald_to_check['coefficient']==0].shape[0] > 0:
to_delete = wald_to_check[wald_to_check['coefficient']==0]['feature'].tolist()
"""if feature == to_delete:
candidates.remove(feature)
prev_score = prev_score-eps-0.05"""
print ('To drop:', to_delete, ' with zero coefficients (no score changes)')
result_features=[x for x in result_features if x not in to_delete]
if set(result_features) in feature_sets:
print('Feature selection entered loop: terminating feature selection')
break
else:
to_continue=True
new_score = self.get_cv_score(Data(data.dataframe, target = data.target,
features = result_features, weights = data.weights),
scoring = scoring, selected_features=False)
scores.append(new_score)
feature_sets.append(set(result_features))
plt.plot(np.arange(len(scores)), scores, 'bo-', linewidth=2.0)
plt.xticks(np.arange(len(scores)), ['step ' + str(i) for i in np.arange(len(scores))], rotation = 'vertical')
plt.ylabel(scoring)
plt.title('Stepwise score changes')
plt.show()
self.selected = sorted(list(feature_sets[-1]))
return new_score, self.selected
else:
print ('Incorrect kind of selection. Please use backward, forward or mixed. Good luck.')
return None
#edited 22.08.2018 by <NAME> - selected_features=True
def fit(self, data, selected_features = True):
'''
Fits the model to the data given on the selected features or on all.
Parameters
-----------
data: data (type Data) for fitting
selected_features: whether to fit on the features selected previously or not,
True - use selected features, False - use all features
'''
self.coefs = {}
if selected_features:
print('Using selected features: '+str(self.selected))
self.features = self.selected
else:
print('Using all available features: '+str(data.features))
self.features = data.features
if self.features == None:
print ('No features, how can that happen? :(')
return None
try:
if data.weights is None:
self.model.fit(data.dataframe[self.features], data.dataframe[data.target])
else:
self.model.fit(data.dataframe[self.features], data.dataframe[data.target],
sample_weight = data.dataframe[data.weights])
except Exception:
print('Fit failed! Maybe there are missings in data?...')
return None
for i in range(len(self.features)):
self.coefs[self.features[i]] = self.model.coef_[0][i]
def final_exclude(self, input_data, excluded=None, apply_changes=False):
'''
Checks the effect of one feature exclusion (after exclusion all features from 'excluded' list) for each of
available features and prints initial gini values and difference after each feature exclusion. After exclusion
list is decided this method can be used to exclude decided features and fit current model, using the rest of them.
Parameters
-----------
input_data: A Data or DataSamples object for fitting and gini calculation
excluded: a list of features to exclude before exclusion cycle
apply_changes: if True then all features from 'excluded' list will be excluded and the model will be fitted
using the rest of the features
'''
if len(self.selected)==0:
print('No selected features to try exclusion. Abort.')
return
if excluded is None:
excluded=[]
if type(input_data)==DataSamples:
retrain_data=input_data.train
else:
retrain_data=input_data
new_selected=[x for x in self.selected if x not in excluded]
if apply_changes:
self.selected=new_selected
self.fit(retrain_data, selected_features=True)
self.draw_coefs()
return self.roc_curve(input_data, figsize=(10, 7))
else:
try_model=LogisticRegressionModel(random_state = 42, penalty = self.regularization, C = self.regularization_value, solver = self.solver)
try_model.selected=new_selected
try_model.fit(retrain_data, selected_features=True)
#try_model.draw_coefs()
ginis_excl={}
ginis_excl['initial']=try_model.roc_curve(input_data, verbose=False)
for excl in new_selected:
#try_model = LogisticRegressionModel(random_state = 42, penalty = self.regularization, C = self.regularization_value, solver = self.solver)
try_model.selected=[x for x in new_selected if x!=excl]
try_model.fit(retrain_data, selected_features=True)
#try_model.draw_coefs()
new_ginis=try_model.roc_curve(input_data, verbose=False)
ginis_excl[excl]=[new_ginis[0]-ginis_excl['initial'][0], new_ginis[1]-ginis_excl['initial'][1], new_ginis[2]-ginis_excl['initial'][2]]
ginis_excl_df=pd.DataFrame(ginis_excl).T
if type(input_data)==DataSamples:
cols=['Train']
if input_data.validate is not None:
cols.append('Validate')
if input_data.test is not None:
cols.append('Test')
ginis_excl_df.columns=cols
ginis_excl_df.sort_values('Test' if 'Test' in cols else 'Validate' if 'Validate' in cols else 'Train', ascending=False, inplace=True)
else:
ginis_excl_df.columns=['Data']
ginis_excl_df.sort_values('Data', ascending=False, inplace=True)
return ginis_excl_df
def bootstrap_gini(self, bs_base, samples, bootstrap_part=0.75, bootstrap_number=10, stratify=True, replace=True, seed=0,
woe_transform=None, crosses_transform=None, figsize=(15,10), bins=None):
'''
Calculates Gini in bootstrap samples (either provided or generated) and plots their distribution with
gini values from provided samples (Data, DataSamples or list of Data)
Parameters
-----------
bs_base: a DataSamples object with bootstrap_base and bootstrap or a Data object to generate boostrap samples from
samples: a DataSamples object with train/validate/test samples, a Data object or a list of Data objects to mark gini values on plot
bootstrap_part: the size of each bootstrap sample is defined as part of input data sample
bootstrap_number: number of generated bootstrap samples
stratify: should bootstraping be stratified by data target
replace: is it acceptable to repeat rows from train dataframe for bootstrap samples
seed: value of random_state for dataframe.sample (each random_state is calculated as seed + number in bootstrap)
woe_transform: a WOE object to perform WoE-transformation before using model
bins: number of bins for the distribution plot (if None - use Freedman-Diaconis rule)
crosses_transform: a Crosses object to perform cross-transformation before using model
'''
if isinstance(bs_base, DataSamples):
if bs_base.bootstrap_base is None:
print('No bootstrap data provided in the input DataSamples object. Return none')
return None
else:
print('Using bootstrap data provided in the input DataSamples object..')
bootstrap=bs_base.bootstrap
bootstrap_base=bs_base.bootstrap_base
elif isinstance(bs_base, Data):
print('Generating bootstrap data from the input Data object..')
DS_gini=DataSamples()
DS_gini.bootstrap_split(bs_base, bootstrap_part=bootstrap_part, bootstrap_number=bootstrap_number, stratify=stratify, replace=replace, seed=seed)
bootstrap=DS_gini.bootstrap
bootstrap_base=DS_gini.bootstrap_base
else:
print('No bootstrap data was provided in the input. Return none')
return None
if isinstance(samples, DataSamples):
check_samples=[]
for sample in [samples.train, samples.validate, samples.test]:
if sample is not None:
check_samples.append(sample)
elif isinstance(samples, list):
check_samples=samples.copy()
elif isinstance(samples, Data):
check_samples=[samples]
else:
print('No samples data was provided in the input')
check_samples=[]
samples_gini={}
for i in range(len(check_samples)):
if check_samples[i].name is None:
current_sample=str(i)
else:
current_sample=check_samples[i].name
print('Calculating gini for', current_sample,'sample..')
if self.selected!=[x for x in self.selected if x in check_samples[i].dataframe]:
print('Not all features from the current model were found in the',current_sample,'sample..')
if woe_transform is None and crosses_transform is None:
print('No WOE or Crosses object were found. Return None.')
return None
else:
if woe_transform is not None:
print('Starting woe-transformation..')
to_calc_gini=woe_transform.transform(check_samples[i],
features=[x[:-4] for x in self.selected if x[:-4] in woe_transform.feature_woes],
keep_essential=False if crosses_transform is not None else True, calc_gini=False)
if crosses_transform is not None:
print('Starting crosses-transformation..')
to_calc_gini=crosses_transform.transform(to_calc_gini if woe_transform is not None else check_samples[i],
keep_essential=True, calc_gini=False)
else:
to_calc_gini=Data(check_samples[i].dataframe[self.selected+[check_samples[i].target]], check_samples[i].target, features=self.selected)
preds = self.predict(to_calc_gini)[:,1]
fpr, tpr, _ = roc_curve(to_calc_gini.dataframe[to_calc_gini.target], preds)
samples_gini[current_sample] = (2*auc(fpr, tpr)-1)*100
if self.selected!=[x for x in self.selected if x in bootstrap_base.dataframe]:
print('Not all features from the current model were found in the bootstrap data..')
if woe_transform is None and crosses_transform is None:
print('No WOE or Crosses object were found. Return None.')
return None
else:
if woe_transform is not None:
print('Starting woe-transformation..')
bootstrap_base=woe_transform.transform(bootstrap_base,
features=[x[:-4] for x in self.selected if x[:-4] in woe_transform.feature_woes],
keep_essential=False if crosses_transform is not None else True, calc_gini=False)
if crosses_transform is not None:
print('Starting crosses-transformation..')
bootstrap_base=crosses_transform.transform(bootstrap_base, keep_essential=True, calc_gini=False)
#bootstrap_base=woe_transform.transform(bootstrap_base, features=[x[:-4] for x in self.selected], keep_essential=True, calc_gini=False)
bootstrap_gini=[]
print('Calculating gini for bootstrap samples..')
for i in range(len(bootstrap)):
preds = self.predict(Data(bootstrap_base.dataframe.iloc[bootstrap[i]][self.selected], bootstrap_base.target, features=self.selected))[:,1]
fpr, tpr, _ = roc_curve(bootstrap_base.dataframe.iloc[bootstrap[i]][bootstrap_base.target], preds)
bootstrap_gini.append((2*auc(fpr, tpr)-1)*100)
plt.figure(figsize=figsize)
sns.distplot(bootstrap_gini, bins=bins)
palette = itertools.cycle(sns.color_palette())
for s in samples_gini:
plt.axvline(x=samples_gini[s], linestyle='--', color=next(palette), label=s)
plt.axvline(x=np.mean(bootstrap_gini)-2*np.std(bootstrap_gini), linestyle='-', color='red', alpha=0.5)
plt.text(np.mean(bootstrap_gini)-2*np.std(bootstrap_gini), 0, ' mean-2*std = '+str(round(np.mean(bootstrap_gini)-2*np.std(bootstrap_gini),4)),
horizontalalignment='right', verticalalignment='bottom', rotation=90, fontsize=12)
plt.axvline(x=np.mean(bootstrap_gini)+2*np.std(bootstrap_gini), linestyle='-', color='red', alpha=0.5)
plt.text(np.mean(bootstrap_gini)+2*np.std(bootstrap_gini), 0, ' mean+2*std = '+str(round(np.mean(bootstrap_gini)+2*np.std(bootstrap_gini),4)),
horizontalalignment='right', verticalalignment='bottom', rotation=90, fontsize=12)
plt.xlabel('Gini values in bootstrap')
plt.ylabel('Distribution')
plt.legend()
#plt.title(feature.feature, fontsize = 16)
#if out:
# plt.savefig(out_images+feature.feature+".png", dpi=100, bbox_inches='tight')
plt.show()
return samples_gini, bootstrap_gini
def drop_features(self, to_drop = None):
'''
deletes features from the model
Parameters
-----------
to_drop: a feature or a list of features that should be excluded
'''
if to_drop is None:
print ('Please enter the features you want to exclude. Use parameter features_to_drop and restart this method.')
return None
elif isinstance(to_drop, list):
print ('The features will be removed from the "selected features" list.')
for feature in to_drop:
if feature in self.selected:
self.selected.remove(feature)
print (feature, 'removed')
else:
print ('The feature will be removed from the "selected features" list.')
if to_drop in self.selected:
self.selected.remove(feature)
print (to_drop, 'removed')
#edited 22.08.2018 by <NAME> - selected_features=True
def get_cv_score(self, data, cv = 5, scoring = 'roc_auc', selected_features = True):
'''
Calculates the model quality with cross-validation
Parameters
-----------
data: data for cross-validation score calculation
cv: number of folds
scoring: metric of quality
selected_features: whether to use selected features or not, True - use selected features, False - use all features
Returns
-----------
cross-validation score
'''
if selected_features:
features = self.selected
else:
features = data.features
if features == None:
print ('No features, how can that happen? :(')
return None
if data.weights == None:
return cross_val_score(self.model, data.dataframe[features],
data.dataframe[data.target], cv = cv, scoring = scoring).mean()
else:
return cross_val_score(self.model, data.dataframe[features], data.dataframe[data.target], cv = cv,
scoring = scoring, fit_params = {'sample_weight' : data.dataframe[data.weights]}).mean()
def form_scorecard(self, woe=None, crosses=None, out = None, sep=';', score_value=444, score_odds=10, double_odds=69):
'''
Makes a scorecard and exports it to a file.
Parameters
-----------
woe: a WOE object for scoring card
crosses: a Crosses object for scoring card
out: file to export the scorecard in csv/xlsx format
sep: the separator to be used in case of csv export
score_value: score value, used for scaling
score_odds: odds of score value, used for scaling
double_odds: score value increament, that halfes the odds, used for scaling
Returns:
----------
A scorecard (pandas.DataFrame)
'''
# if no WOE used then onle regression coefficients are included
if woe is None and crosses is None:
print ('Achung bitte: keine WOE')
scorecard = pd.DataFrame(columns = ['feature', 'coefficient'])
for feature in self.features:
tmp = pd.DataFrame([[feature, self.coefs[feature]]], columns = ['feature', 'coefficient'])
scorecard = scorecard.append(tmp, ignore_index=True)
scorecard = scorecard.append(pd.DataFrame([['intercept', self.model.intercept_[0]]], columns = ['feature',
'coefficient']),
ignore_index=True)
#scorecard.to_csv(fname, sep = ';')
#return scorecard
else:
scorecard = pd.DataFrame(columns = ['feature', 'categorical', 'group', 'values', 'missing', 'woe', 'coefficient',
'sample_part', 'ER'])
for feature in self.features:
if woe is not None and feature[:-4] in woe.feature_woes:
woes = woe.feature_woes[feature[:-4]].woes
missing_group=woe.feature_woes[feature[:-4]].missing_group
groups = woe.feature_woes[feature[:-4]].groups
categorical=woe.feature_woes[feature[:-4]].categorical
d=woe.feature_woes[feature[:-4]].data
if d.weights is None:
all_obs=d.dataframe.shape[0]
else:
all_obs=d.dataframe[d.weights].sum()
# searching for WOE for each interval of values
for group in [x for x in woes if woes[x] is not None]:
if d.weights is None:
obs=d.dataframe[d.dataframe[feature]==woes[group]].shape[0]
bad=d.dataframe[d.dataframe[feature]==woes[group]][d.target].sum()
else:
obs=d.dataframe[d.dataframe[feature]==woes[group]][d.weights].sum()
bad=d.dataframe[(d.dataframe[feature]==woes[group]) & (d.dataframe[d.target]==1)][d.weights].sum()
missing_in=(group==missing_group)*1
tmp = pd.DataFrame([[feature[:-4], categorical, group, groups[group], missing_in, woes[group], self.coefs[feature],
obs/all_obs, bad/obs]],
columns = ['feature', 'categorical', 'group', 'values', 'missing', 'woe', 'coefficient',
'sample_part', 'ER'])
scorecard = scorecard.append(tmp, ignore_index=True)
elif crosses is not None and int(feature[len(crosses.prefix):-4]) in crosses.decision_trees:
tree = crosses.decision_trees[int(feature[len(crosses.prefix):-4])].tree.dropna(how='all', axis=1)
leaves = tree[tree['leaf']]
for group in sorted(leaves['group'].unique().tolist()):
current_group=leaves[leaves['group']==group]
used_features=list(leaves.columns[:leaves.columns.get_loc('node')])
current_woe=current_group['group_woe'].unique()[0]
current_er=current_group['group_target'].unique()[0]/current_group['group_amount'].unique()[0]
current_sample_part=current_group['group_amount'].unique()[0]/leaves[['group', 'group_amount']].drop_duplicates()['group_amount'].sum()
current_values=[]
for _, row in current_group.iterrows():
used_features=[]
parent_node=row['parent_node']
while parent_node is not None:
used_features=[tree[tree['node']==parent_node]['split_feature'].values[0]]+used_features
parent_node=tree[tree['node']==parent_node]['parent_node'].values[0]
current_values.append({x:row[x] for x in used_features})
#current_values=[{x:row[x] for x in used_features if row[x] is not None} for _, row in current_group.iterrows()]
scorecard = scorecard.append({'feature':feature[:-4], 'categorical':np.nan, 'group':group,
'values': current_values, 'missing':0, 'woe':current_woe,
'coefficient':self.coefs[feature], 'sample_part':current_sample_part,
'ER':current_er}
, ignore_index=True)
else:
print ('Achung bitte: keine feature',feature,'. Skipping')
scorecard = scorecard.sort_values(by = ['feature', 'group'])
# bias addition
scorecard_intercept = pd.DataFrame([['intercept', np.nan, np.nan, np.nan, np.nan, np.nan, self.model.intercept_[0], np.nan, np.nan]],
columns = ['feature', 'categorical', 'group', 'values', 'missing', 'woe',
'coefficient', 'sample_part', 'ER'])
multiplier=double_odds/np.log(2)
if double_odds>0:
scorecard=scorecard.merge(scorecard[['feature', 'woe']].groupby('feature', as_index=False).min().rename(index=str, columns={"woe": "woe_shift"}), on='feature',how='left')
else:
scorecard=scorecard.merge(scorecard[['feature', 'woe']].groupby('feature', as_index=False).max().rename(index=str, columns={"woe": "woe_shift"}), on='feature',how='left')
scorecard['woe_shifted']=scorecard['woe']-scorecard['woe_shift']
scorecard['score']=-(scorecard['woe_shifted']*scorecard['coefficient']*multiplier)
for_intercept=scorecard[['coefficient', 'woe_shift']].drop_duplicates().copy()
for_intercept['woe_on_coef']=-for_intercept['coefficient']*for_intercept['woe_shift']*multiplier
scorecard_intercept['score']=-((scorecard_intercept['coefficient']+np.log(score_odds))*multiplier)+score_value+for_intercept['woe_on_coef'].sum()
scorecard_intercept.index=[-1]
scorecard=scorecard.append(scorecard_intercept).sort_index().reset_index(drop=True)[['feature', 'categorical', 'group',
'values', 'missing', 'woe',
'coefficient', 'score',
'sample_part', 'ER']]
#display(scorecard)
scorecard['score']=round(scorecard['score']).astype('int64')
scorecard['values']=scorecard['values'].astype(str)
# export to a file
if out is not None:
if out[-4:]=='.csv':
scorecard.to_csv(out, sep = sep, index=False)
elif out[-4:]=='.xls' or out[-5:]=='.xlsx':
scorecard.to_excel(out, sheet_name='Missing', index=False)
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
self.scorecard = scorecard
return scorecard
#edited 28.08.2018 by <NAME>
def score(self, data, features_to_leave=None, include_scores_in_features=False, unknown_score=0, verbose=True):
'''
Performs data scoring
Parameters
-----------
data: data of type Data
features_to_leave: list of fields to include in output dataframe
include_scores_in_features: should all scores be treated as features in output Data object (otherwise new features will be empty)
Returns
-----------
Data object, containing dataframe with initial features (+ features_to_leave), their scores and overall score
'''
if verbose:
print ('Scores calculation...')
if self.scorecard is None or self.scorecard.shape[0] == 0:
print ('No scorecard! Where is it?')
return None
if 'score' not in self.scorecard.columns:
print ('Please set scores: scorecard[score]')
return None
scorecard=self.scorecard.copy()
scorecard['values']=scorecard['values'].astype(str)
features_to_leave=[] if features_to_leave is None else features_to_leave.copy()
features_to_leave+=([data.target] if data.target is not None else [])+([data.weights] if data.weights is not None else [])
features_to_leave=list(set(features_to_leave))
trees_for_score=scorecard[scorecard.apply(lambda row: pd.isnull(row['categorical']) and row['feature']!='intercept', axis=1)]['feature'].unique().tolist()
features_for_score=[x for x in scorecard.feature.unique() if x!='intercept' and x not in trees_for_score]
all_features=features_for_score.copy()
scorecard.loc[scorecard.feature.isin(trees_for_score)==False, 'values']=\
scorecard.loc[scorecard.feature.isin(trees_for_score)==False, 'values'].apply(lambda x:
np.nan if x=='nan' else \
eval(x.replace('[nan]', '[np.nan]').replace('[nan,','[np.nan,').replace(', nan]',', np.nan]')\
.replace(', inf]',', np.inf]').replace('[-inf,','[-np.inf,')))
if len(trees_for_score)>0:
scorecard.loc[scorecard.feature.isin(trees_for_score), 'values']=\
scorecard.loc[scorecard.feature.isin(trees_for_score), 'values'].apply(lambda x:
eval(x.replace(': nan,',': np.nan,').replace(': nan}',': np.nan}')\
.replace('), nan)','), np.nan)').replace(', nan,',', np.nan,')\
.replace('[nan,','[np.nan,').replace(', nan]',', np.nan]').replace('[nan]', '[np.nan]')\
.replace(', inf)',', np.inf)').replace('(-inf,','(-np.inf,')))
all_features+=list(set([f for values in scorecard[scorecard.feature.isin(trees_for_score)]['values'] for node in values for f in node]))
all_features=list(set(all_features))
all_features=sorted(all_features)
try:
data_with_scores = data.dataframe[list(set(all_features+features_to_leave))].copy()
for feature in features_for_score:
if verbose:
print (feature)
bounds = list(scorecard[scorecard.feature == feature]['values'])
scores = list(scorecard[scorecard.feature == feature].score)
missing = list(scorecard[scorecard.feature == feature].missing)
categorical = list(scorecard[scorecard.feature == feature].categorical)[0]
if categorical==False:
bs = {}
missing_score=0
for i in range(len(bounds)):
if missing[i]==1:
missing_score=scores[i]
if isinstance(bounds[i],list):
bs[scores[i]]=bounds[i][0]
bs[np.inf]=np.inf
bs={x:bs[x] for x in sorted(bs, key=bs.get)}
data_with_scores[feature+'_scr']=data_with_scores[feature].apply(
lambda x: missing_score if pd.isnull(x) \
else list(bs.keys())[np.argmax([bs[list(bs.keys())[i]] <= x and bs[list(bs.keys())[i+1]] > x for i in range(len(bs.keys())-1)])])
else:
bs = {}
missing_score=0
for i in range(len(bounds)):
bs[scores[i]]=bounds[i]
for b in bs[scores[i]]:
if | pd.isnull(b) | pandas.isnull |
""" Parsing source data into simple tsv datasets.
To parse Bgl3 and GB1, ENRICH2 MUST BE INSTALLED IN A SEPARATE CONDA ENVIRONEMNT NAMED 'enrich2' """
from os.path import isfile, join
import collections
import numpy as np
import pandas as pd
import enrich2
import utils
def parse_avgfp():
""" create the gfp dataset from raw source data """
source_fn = "source_data/avgfp/amino_acid_genotypes_to_brightness.tsv"
out_fn = "data/avgfp/avgfp.tsv"
if isfile(out_fn):
print("err: parsed avgfp dataset already exists: {}".format(out_fn))
return
# load the source data
data = pd.read_csv(source_fn, sep="\t")
# remove the wild-type entry
data = data.loc[1:]
# create columns for variants, number of mutations, and score
variants = data["aaMutations"].apply(lambda x: ",".join([x[1:] for x in x.split(":")]))
num_mutations = variants.apply(lambda x: len(x.split(",")))
score = data["medianBrightness"]
# create the dataframe
cols = ["variant", "num_mutations", "score"]
data_dict = {"variant": variants.values, "num_mutations": num_mutations.values, "score": score.values}
df = pd.DataFrame(data_dict, columns=cols)
# now add a normalized score column - these scores have the wild-type score subtracted from them
df["score_wt_norm"] = df["score"].apply(lambda x: x - 3.7192121319)
df.to_csv(out_fn, sep="\t", index=False)
def filter_dataset(df, threshold):
""" filter out variants that do not meet the required threshold for number of reads """
df = df[(df["inp"] + df["sel"]) >= threshold]
return df
def parse_bgl3_variant_list(ml, col_name):
""" creates a dataframe from the given list of variants """
# filter wild-type counts out, add to dataframe at the end
ml_no_wt = []
wt = []
for variant in ml:
if variant.startswith("WTcount"):
wt.append(int(variant.split(",")[-1].strip()))
else:
ml_no_wt.append(variant)
count_dict = collections.Counter(ml_no_wt)
frame = pd.DataFrame(index=count_dict.keys(), data=count_dict.values())
frame.columns = [col_name]
# add wild-type counts back in to datafrae
frame.loc["_wt"] = sum(wt)
return frame
def get_bgl3_count_df(output_dir=None):
""" combines the inp and sel variant lists into a single dataframe with counts """
inp_fn = "source_data/bgl3/unlabeled_Bgl3_mutations.txt"
sel_fn = "source_data/bgl3/positive_Bgl3_mutations.txt"
cache_fn = "bgl3_raw_counts.tsv"
if output_dir is None or not isfile(join(output_dir, cache_fn)):
print("Computing bgl3 count df from raw counts")
inp_variant_list = utils.load_lines(inp_fn)
sel_variant_list = utils.load_lines(sel_fn)
df = pd.concat([parse_bgl3_variant_list(inp_variant_list, "inp"),
parse_bgl3_variant_list(sel_variant_list, "sel")], axis=1, sort=True).fillna(0)
if output_dir is not None:
df.to_csv(join(output_dir, cache_fn), sep="\t")
return df
print("Loading cached count df from file: {}".format(join(output_dir, cache_fn)))
return pd.read_csv(join(output_dir, cache_fn), sep="\t", index_col=0)
def parse_bgl3():
""" create the bgl3 dataset from raw source data """
out_dir = "data/bgl3"
out_fn = "bgl3.tsv"
if isfile(join(out_dir, out_fn)):
print("err: parsed bgl3 dataset already exists: {}".format(join(out_dir, out_fn)))
return
# creates a single dataframe with counts from the given mutations lists
df = get_bgl3_count_df(output_dir=out_dir)
# filter the variants based on count threshold
threshold = 5
df = filter_dataset(df, threshold=threshold)
enrich2.create_e2_dataset(df, output_dir=out_dir, output_fn=out_fn)
def get_gb1_count_df(output_dir=None):
""" creates a single dataframe with raw counts for all gb1 variants """
cache_fn = "gb1_raw_counts.tsv"
if output_dir is None or not isfile(join(output_dir, cache_fn)):
print("Computing gb1 count df from raw counts")
single = | pd.read_csv("source_data/gb1/single_mutants.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 16:13:07 2019
@author: ning
"""
import os
import pandas as pd
from glob import glob
import seaborn as sns
sns.set_style('whitegrid')
sns.set_context('poster')
from matplotlib import pyplot as plt
working_dir = '../results/raw counts'
pos = pd.read_csv(os.path.join(working_dir,'pos.csv'))
att = pd.read_csv(os.path.join(working_dir,'att.csv'))
fig,axes = plt.subplots(figsize=(15,20),nrows=2,sharey=True)
ax = axes[0]
hue_order = pd.unique(pos['N-1 --> N'])
hue_order.sort()
sns.barplot(x = 'x',
y = 'Probability',
hue = 'N-1 --> N',
hue_order = hue_order,
data = pos,
ax = ax,
)
ax.set(ylim=(0,1.),xlabel='',xticks=[],
title = "Exp.1")
ax = axes[1]
hue_order = | pd.unique(att['N-1 --> N']) | pandas.unique |
#!/usr/bin/env python
# coding: utf-8
# In this notebook,I have done some Exploratory Data Analysis(EDA) on the data and also, I used different classifier models to predict the quality of the wine.
# 1. Logistic Regression
# 2. KNeighborsClassifier
# 3. SVC
# 4. DecisionTree Classifier
# 5. RandomForest Classifier
# And also, I used cross validation evaluation technique to optimize the model performance.
#
#
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../../../input/piyushgoyal443_red-wine-dataset/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("../../../input/piyushgoyal443_red-wine-dataset"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# In[ ]:
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from matplotlib.ticker import FormatStrFormatter
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score,confusion_matrix,classification_report
from sklearn.tree import DecisionTreeClassifier ,export_graphviz
import graphviz
from IPython.display import Image # To plot decision tree.
from sklearn.externals.six import StringIO
# In[ ]:
df = | pd.read_csv("../../../input/piyushgoyal443_red-wine-dataset/wineQualityReds.csv") | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
from pandas.util import testing as tm
pyreadstat = pytest.importorskip("pyreadstat")
def test_spss_labelled_num(datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "labelled-num.sav")
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame({"VAR00002": "This is one"}, index=[0])
expected["VAR00002"] = pd.Categorical(expected["VAR00002"])
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import geopandas as gpd
import sys
sys.path.append("..")
from misc.utils import normalize
from models.spatial_tools.misc import rank_freq
def calculate_distances(gs1, gs2):
"""
Calculates the distance (ellipsoidal) between to GeoSeries
:param gs1: GeoSeries1
:param gs2: GeoSeries2
:return: The GeoSeries of distances
"""
return gs1.distance(gs2)
def commute_distances(trajectories_frame, quantity=2):
"""
Calculates the commuting distances for each user. Quantity regulates the number of locations to which the distance
is calculated.
:param trajectories_frame: TrajectoriesFrame class object
:param quantity: The number of locations to which the distance will be calculated
:return: The DataFrame of distances to the locations
"""
sig_frame = rank_freq(trajectories_frame, quantity=quantity)
indices = []
distances = {}
for n in range(quantity - 1):
for k in range(n + 1, quantity):
indices.append((n, k))
indices = sorted(indices, key=lambda x: x[1])
prev_k = 1
df_list = []
for n, k in indices:
if k != prev_k:
distances[prev_k] = | pd.concat(df_list, axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Pre-modeling set-up
06/01/2019
<NAME>
"""
# Import necessary libraries for data preparation/EDA
from sqlalchemy import create_engine
import pandas as pd
import numpy as np
from collections import defaultdict
import pickle
# SET-UP ----------------------------------------------------------------------
# Connect to PostgresDB and pull in datasets
engine = create_engine("postgresql://postgres:dfdk#418!@@172.16.17.32/postgres")
# Yahoo! Finance
yahoo=pd.read_sql_query('select * from stock_price', con=engine)
print("Yahoo! Finance features:")
print(yahoo.columns.tolist())
# SimFin Fundamentals
simfindaily=pd.read_sql_query('select * from daily_simfin', con=engine)
print("SimFin features:")
print(simfindaily.columns.tolist())
# Derived momentum features
momentum=pd.read_sql_query('select * from momentum_features', con=engine)
print("Derived features")
print(momentum.columns.tolist())
# S&P 500 index
snp = pd.read_sql_query('select * from spy_stock_price', con = engine)
print("S&P 500")
print(snp.columns.tolist())
# Some quick fixes on keys
simfindaily['date_of_transaction'] = simfindaily['date']
simfindaily.drop('date', axis=1, inplace=True)
yahoo['ticker'] = yahoo['Symbol']
yahoo.drop('Symbol', axis=1, inplace=True)
momentum['ticker'] = momentum['Symbol']
momentum['date_of_transaction'] = momentum['Date']
momentum.drop(['Symbol', 'Date', 'High', 'Low',
'Open', 'Close', 'Volume', 'AdjClose'],
axis=1, inplace=True)
snp['snp500_close'] = snp['Adj Close']
snp['snp500_open'] = snp['Open']
snp = snp[['date_of_transaction', 'snp500_close', 'snp500_open']]
# Merge
df = pd.merge(yahoo, momentum, on=['ticker', 'date_of_transaction'])
df = pd.merge(df, simfindaily, how='left', on=['ticker', 'date_of_transaction'])
df = df.sort_values(['ticker','date_of_transaction']).reset_index(drop = True)
df.head()
# Pull out the tickers
tickers = df['ticker'].unique().tolist()
# COMBINED DATA SET FEATURE ENGINEERING ---------------------------------------
# Replace some missing values
df['Pct_Change_Yearly'].fillna(value=df['Pct_Change_Monthly'], inplace=True)
df['Yearly_Return_Rank'].fillna(value=df['Monthly_Return_Rank'], inplace=True)
df['Momentum_Quality_Yearly'].fillna(value=df['Momentum_Quality_Monthly'], inplace=True)
df['Volatility'].fillna(df['Volatility'].mean(), inplace=True)
# Some normalization
df['Monthly_Return_Rank'] = df['Monthly_Return_Rank'] / 500
df['Yearly_Return_Rank'] = df['Yearly_Return_Rank'] / 500
df['RSI'] = df['RSI'] / 100
# Construct some aggregate financial ratios from the SimFin data
df['eps'] = df['net_income_y'] / df['common_outstanding_basic']
df['pe_ratio'] = df['AdjClose'] / df['eps']
df['debt_ratio'] = df['total_liabilities'] / df['total_equity']
df['debt_to_equity'] = df['total_liabilities'] / df['total_equity']
df['roa'] = df['net_income_y'] / df['total_assets']
# Construct some additional ticker-level returns features
df['open_l1'] = df.groupby('ticker')['Open'].shift(1)
df['open_l5'] = df.groupby('ticker')['Open'].shift(5)
df['open_l10'] = df.groupby('ticker')['Open'].shift(10)
df['return_prev1_open_raw'] = 100*(df['Open'] - df['open_l1']) / df['open_l1']
df['return_prev5_open_raw'] = 100*(df['Open'] - df['open_l5']) / df['open_l5']
df['return_prev10_open_raw'] = 100*(df['Open'] - df['open_l10']) / df['open_l10']
df['close_l1'] = df.groupby('ticker')['AdjClose'].shift(1)
df['close_l5'] = df.groupby('ticker')['AdjClose'].shift(5)
df['close_l10'] = df.groupby('ticker')['AdjClose'].shift(10)
df['return_prev1_close_raw'] = 100*(df['AdjClose'] - df['close_l1']) / df['close_l1']
df['return_prev5_close_raw'] = 100*(df['AdjClose'] - df['close_l5']) / df['close_l5']
df['return_prev10_close_raw'] = 100*(df['AdjClose'] - df['close_l10']) / df['close_l10']
# Compute market betas
betas = np.empty(df.shape[0])
for t in tickers:
idx = df['ticker'].loc[df['ticker'] == t].index.tolist()
x_t = df[['date_of_transaction', 'AdjClose']].iloc[idx]
x_t = pd.merge(x_t, snp, on='date_of_transaction').sort_values('date_of_transaction')
market_return = np.array(x_t['snp500_close'].tolist())
asset_return = np.array(x_t['AdjClose'].tolist())
beta_vector = np.empty(len(asset_return)) * np.nan
i = 21
while i < len(beta_vector):
beta_vector[i] = (np.cov(market_return[:(i-1)],
asset_return[:(i-1)])[0,1] /
np.var(market_return[:(i-1)]))
i += 1
betas[idx] = beta_vector
df['beta'] = betas
# Features to smooth
to_smooth = ['High', 'Low', 'Open', 'Close', 'Volume', 'AdjClose', 'Pct_Change_Daily',
'Pct_Change_Monthly', 'Pct_Change_Yearly', 'RSI', 'Volatility',
'Yearly_Return_Rank', 'Monthly_Return_Rank', 'Pct_Change_Class',
'Rolling_Yearly_Mean_Positive_Days', 'Rolling_Monthly_Mean_Positive_Days',
'Rolling_Monthly_Mean_Price', 'Rolling_Yearly_Mean_Price',
'open_l1', 'open_l5', 'open_l10', 'close_l1', 'close_l5', 'close_l10',
'return_prev1_open_raw', 'return_prev5_open_raw', 'return_prev10_open_raw',
'return_prev1_close_raw', 'return_prev5_close_raw', 'return_prev10_close_raw',
'pe_ratio', 'debt_ratio', 'debt_to_equity', 'roa', 'Momentum_Quality_Monthly',
'Momentum_Quality_Yearly', 'SPY_Trailing_Month_Return'
]
# Create smoothed variants of specified features
for feature in to_smooth:
print("Smoothing '{}'".format(feature))
x_to_smooth = np.array(df[feature].tolist())
col = feature + "_smoothed"
for t in tickers:
idx = df['ticker'].loc[df['ticker'] == t].index.tolist()
x_t = np.array(x_to_smooth[idx].tolist())
# Compute EMA smoothing of target within ticker
ema = 0
gamma_ = 0.1
for t_i in range(len(x_t)):
ema = gamma_*x_t[t_i] + (1-gamma_)*ema
x_t[t_i] = ema
x_to_smooth[idx] = x_t
df[col] = x_to_smooth
# Hash the ticker to create a categorical feature
from sklearn.feature_extraction import FeatureHasher
h = FeatureHasher(n_features = len(tickers), input_type = 'string')
f = h.transform(df['ticker'])
ticker_features = f.toarray()
# Remove the quarter of pre-SimFin data
train = df[df['date_of_transaction'] >= '2011-03-31'].reset_index(drop=True)
# At the ticker level, lead the AdjClose column by n-trading days
target_gen = train[['ticker', 'date_of_transaction', 'AdjClose',
'Monthly_Return_Rank', 'SPY_Trailing_Month_Return',
'Pct_Change_Monthly', 'beta']]
target_gen = | pd.merge(target_gen, snp, on='date_of_transaction') | pandas.merge |
from __future__ import print_function, division, absolute_import
import collections
import functools as ft
import json
import operator as op
import os.path
import re
import pandas as pd
from pandas.core.dtypes.api import is_scalar
def escape_parameters(params):
if isinstance(params, dict):
return {k: escape(v) for k, v in params.items()}
elif isinstance(params, tuple):
return tuple(escape(v) for v in params)
else:
raise NotImplementedError('cannot escape parameters of type %s' % type(params))
def escape(val):
if val is None:
return 'null'
elif isinstance(val, str):
return "'" + val.replace("'", "''") + "'"
elif isinstance(val, (int, bool, float)):
return json.dumps(val)
else:
raise NotImplementedError()
def like(s, pattern):
"""Execute a SQL ``like`` expression against a str-series."""
pattern = re.escape(pattern)
pattern = pattern.replace(r'\%', '.*')
pattern = pattern.replace(r'\_', '.')
pattern = '^' + pattern + '$'
# sqlite is case insenstive, is this always the case?
if is_scalar(s):
return re.match(pattern, s) is not None
else:
return s.str.contains(pattern)
def not_like(s, pattern):
"""Execute a SQL ``not like`` expression against a str-series."""
res = like(s, pattern)
if is_scalar(s):
return not res
else:
# handle inversion with missing numbers
return (1 - res).astype(res.dtype)
def trim(what, characters, s):
s = _str_funcs(s)
if what == 'leading':
return s.lstrip(characters)
elif what == 'trailing':
return s.rstrip(characters)
elif what == 'both':
return s.strip(characters)
raise ValueError('unknown trim mode %s' % what)
def position(needle, haystack):
return _str_funcs(haystack).find(needle) + 1
def upper(s):
return _str_funcs(s).upper()
def lower(s):
return _str_funcs(s).lower()
def concat(head, *tail):
strings = [head] + list(tail)
strings = [_fillna(s, '') for s in strings]
return ft.reduce(op.add, strings)
def _str_funcs(s):
return s if is_scalar(s) else | pd.Series(s) | pandas.Series |
import pandas as pd
import urllib.request
import numpy as np
import shapefile
from datetime import datetime
from zipfile import ZipFile
import pandasql as ps
import requests
import json
import pkg_resources
def softmax(x):
if np.max(x) > 1:
e_x = np.exp(x/np.max(x))
else:
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
## getProvinceBoundaryBox function is to get the cordinate details from Mapbox API for ITALY
## Parameter Needed - Province Name
def getProvinceBoundaryBox(provinceName):
Place_Details = requests.get(
'http://api.mapbox.com/geocoding/v5/mapbox.places/' + provinceName + '%20province%20Italy.json?access_token=<KEY>').json()[
'features']
for eachPlace in Place_Details:
try:
if eachPlace['context'][0]['text'] == 'Italy' or eachPlace['context'][1]['text'] == 'Italy':
getBbox = eachPlace['bbox']
except:
continue
return getBbox
# The below function used to get the USA Patient Data Automatically from HARVARD DATABASE COVID Patient Database and will create a timeseries patient file along with population of the Area at county along with a USA County file
## Parameter Needed - Target Directory to save the File
def fetch_us_patientdata(tgtdir):
url='https://dataverse.harvard.edu/api/access/datafile/:persistentId?persistentId=doi:10.7910/DVN/HIDLTK/7NWUDK'
urllib.request.urlretrieve(url,tgtdir+'/us_county_confirmed_cases.tab')
latest_data = pd.read_csv(tgtdir+'/us_county_confirmed_cases.tab',sep='\t')
allcols = list(latest_data.columns)
datecols = allcols[allcols.index('HHD10')+1:]
latest_data = latest_data[['COUNTY', 'NAME']+datecols]
datecolsmod=[datetime.strptime(i,'%m/%d/%Y').strftime('%Y%m%d') for i in datecols]
latest_data.columns = ['cfips', 'county']+datecolsmod
latest_data = latest_data.melt(id_vars=['cfips', 'county'], var_name='data_date', value_name='no_pat')
latest_data['county']=latest_data['county'].apply(lambda x : x.split(' County')[0])
url='https://dataverse.harvard.edu/api/access/datafile/:persistentId?persistentId=doi:10.7910/DVN/HIDLTK/OFVFPY'
urllib.request.urlretrieve(url,tgtdir+'/COUNTY_MAP.zip')
zip = ZipFile(tgtdir+'/COUNTY_MAP.zip')
zip.extractall(tgtdir)
sf = shapefile.Reader(tgtdir+"/CO_CARTO")
shape_df = pd.DataFrame()
shapes = sf.shapes()
records = sf.records()
for eachrec in range(len(records)):
eachRec = {}
shapebbbox = shapes[eachrec].bbox
shapelat = (shapebbbox[1] + shapebbbox[3]) / 2
shapelong = (shapebbbox[0] + shapebbbox[2]) / 2
eachRec['lat'] = [shapelat]
eachRec['long'] = [shapelong]
eachRec['county_fips'] = [records[eachrec][0]]
eachRec['county_name'] = [records[eachrec][1]]
eachRec['POP'] = [records[eachrec][10]]
eachRec['HHD'] = [records[eachrec][11]]
shape_df = shape_df.append(pd.DataFrame.from_dict(eachRec))
us_counties = shape_df
us_counties['county_name'] = us_counties['county_name'].apply(lambda x: x.split(' County')[0])
us_counties['county_fips'] = us_counties['county_fips'].apply(lambda x: int(x))
us_counties.columns = ['lat','long', 'cfips', 'county', 'pop', 'HHD']
full_data = pd.merge(latest_data, us_counties, on=['cfips', 'county'])
if sum(full_data['no_pat']) != sum(latest_data['no_pat']):
print("fetch failed")
raise
full_data['no_pat'] = full_data.groupby(['cfips'])['no_pat'].apply(lambda x: x.cummax())
full_data['new_pat'] = full_data.groupby(['lat','long'])['no_pat'].diff()
full_data = full_data.dropna()
us_counties.to_csv(tgtdir+'USA_counties.csv',index=False)
full_data.to_csv(tgtdir+'USA_covid_data_final.csv',index=False)
print(' USA Patient Data Created under Directory :'+tgtdir)
## Below function will create the China COVID19 time series Patient file by abosrving data from Harvard Database and it will create County file along with Population Data by county/province
## Parameter Needed - Target Directory to save the File
def fetch_china_patientdata(tgtdir):
url = 'https://dataverse.harvard.edu/api/access/datafile/3781338?format=original&gbrecs=true'
urllib.request.urlretrieve(url, tgtdir+'/City_Confirmed_Map_China.csv')
latest_data = | pd.read_csv(tgtdir+'/City_Confirmed_Map_China.csv') | pandas.read_csv |
"""Project agnostic utility functions."""
import io
import os
import re
import csv
import sys
import hashlib
import logging
import contextlib
import logging.config
from pathlib import Path
from copy import deepcopy
from numbers import Number
from functools import wraps
from datetime import date, datetime
from collections import OrderedDict, deque
from typing import Dict, List, Tuple, Union
import jinja2
import numpy as np
import pandas as pd
from scipy.stats import iqr
from jinjasql import JinjaSql
from pandas.tseries import offsets
from IPython.display import display
import matplotlib.pyplot as plt # NOQA
logger = logging.getLogger(f'utils.{__name__}')
DEFAULT_JINJA_ENV_ARGS = dict(
autoescape=True, line_statement_prefix="%", trim_blocks=True, lstrip_blocks=True,
)
NULL_COUNT_CLAUSE = """SUM( CASE WHEN {col} IS NULL
THEN 1 ELSE 0 END ) AS {as_col}"""
def make_dirs(dir_path):
"""Add a return value to mkdir."""
Path.mkdir(Path(dir_path), exist_ok=True, parents=True)
return dir_path
def path_or_string(str_or_path):
"""Load file contents as string or return input str."""
file_path = Path(str_or_path)
try:
with file_path.open('r') as f:
return f.read()
except (OSError, ValueError):
return str_or_path
# PythonDecorators/decorator_function_with_arguments.py
def local_df_cache(
# pylint: disable=unused-argument
use_cache=False,
refresh_cache=False,
cache_fn='cache_file',
cache_dir='/tmp/', # nosec
cache_format='pickle',
cache_hit_msg='Reading cached data from file: ',
cache_miss_msg='Cache missing for file: ',
cache_put_msg='Saving data to file: ',
):
"""Decorate the target function to cache ouputed DataFrames.
This decorator receives and removes all cache related kwargs so that the
wrapped function doesn't get them nonetheless the caching behavior can
be changed on the fly.
It must be called as a function even when no arguments are passed:
@local_df_cache()
The decorated function must not have any params that MATCH the ones of this
decorator.
Parameters
----------
use_cache : bool
Whether to consult the cache or not.
refresh_cache : bool
Whether to update the cache or not (does not depend on use_cache).
cache_fn : str
Name of the cache file.
cache_dir : str
Directory where the cached files will be stored.
cache_format : str
Cache file format as supported by `df_to_multi`.
cache_hit_msg : str
Show this when cache hit.
cache_miss_msg : str
Show this when cache miss.
cache_put_msg : str
Show this when cache put.
IMPORTANT: By default the filename is used as cache key,
as such it does not consider the actual contents of the DataFrame
being cached.
Take care to select a cache name that reflects this changes. Such as
including the hash of the query used to generate the data stored.
Warnings
--------
- If the decorated function returns a tuple, only the first dataframe contained in the tuple will be cached.
"""
# Scope jumping scheisse.
orig_cache_opts = locals()
def wrap(func):
"""Do dummy docstring."""
@wraps(func)
def wrapped_f(*args, **kwargs):
"""Do dummy docstring."""
cache_opts_arg = kwargs.pop('cache_opts').copy()
cache_opts = orig_cache_opts.copy()
cache_opts.update(cache_opts_arg)
use_cache = cache_opts['use_cache']
refresh_cache = cache_opts['refresh_cache']
cache_fn = cache_opts['cache_fn']
cache_dir = cache_opts['cache_dir']
cache_format = cache_opts['cache_format']
cache_hit_msg = cache_opts['cache_hit_msg']
cache_miss_msg = cache_opts['cache_miss_msg']
cache_put_msg = cache_opts['cache_put_msg']
# Clear cache args from kwargs
for k in orig_cache_opts:
if k in kwargs:
del kwargs[k]
if callable(cache_fn):
cache_fn = cache_fn(cache_opts, *args, **kwargs)
base_fn = f"{cache_fn}.{cache_format}"
cache_fn = os.path.join(cache_dir, base_fn)
if use_cache:
if os.access(cache_fn, os.R_OK):
logger.debug(f"{cache_hit_msg}{cache_fn}")
return df_read_multi(cache_fn)
else:
logger.debug(f"{cache_miss_msg}{cache_fn}")
rv = func(*args, **kwargs)
if use_cache:
if (not os.access(cache_fn, os.R_OK)) or refresh_cache:
logger.debug(f"{cache_put_msg}{cache_fn}")
# rv might be a tuple: in this case we only cache the
# first dataframe found in the tuple
if isinstance(rv, tuple):
for i in rv:
if isinstance(i, pd.DataFrame):
df_to_multi(i, cache_fn)
break
else:
df_to_multi(rv, cache_fn)
return rv
return wrapped_f
return wrap
def df_read_multi(fn, index_col=False, quoting=0):
"""Read multiple table disk-formats into a pandas DataFrame."""
ext = Path(fn).suffix[1:]
if ext == 'csv':
df = pd.read_csv(fn, index_col=index_col, quoting=quoting)
def clean_quotes(s):
"""Clean start and ending quotes."""
if s[0] in '"\'' and s[-1] in '"\'':
return s[1:-1]
return s
df.columns = list(map(clean_quotes, df.columns))
return df
elif ext == 'feather':
return pd.read_feather(fn)
elif ext in ['pickle', 'pkl']:
return pd.read_pickle(fn)
else:
raise ValueError(f"File format '{ext}' not supported!")
def df_to_multi(df, fn, index=False, quoting=csv.QUOTE_NONNUMERIC):
"""Convert a DF to multiple disk-formats table."""
ext = Path(fn).suffix[1:]
if ext == 'csv':
return df.to_csv(fn, index=index, quoting=quoting)
elif ext == 'feather':
return df.to_feather(fn)
elif ext in ['pickle', 'pkl']:
return df.to_pickle(fn)
else:
raise ValueError(f"File format '{ext}' not supported!")
def convert_to_snake_case(name: str):
"""Convert string to snake_case."""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def deque_to_geo_hierarchy_dict(double_linked_list: deque, target_level: str):
"""Converts a deque to an ordered dictionary using GEO ordered levels."""
orde = OrderedDict() # type: ignore # noqa
d = deepcopy(double_linked_list)
while len(d) > 0:
elem = d.popleft()
level = elem.pop('level')
orde[level] = elem
if target_level == level:
return orde
def str_to_datetime(datetime_str):
"""Convert possible date-like string to datetime object."""
formats = (
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d',
'%Y-%m-%d %H:%M:%S.%f',
'%H:%M:%S.%f',
'%H:%M:%S',
'%Y%m%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S',
'%Y%m%d',
'%Y-%m-%dT%H',
'%Y%m',
)
for frmt in formats:
try:
return datetime.strptime(datetime_str, frmt)
except ValueError:
if frmt is formats[-1]:
raise
def range_datetime(datetime_start, datetime_end, timeskip=None):
"""Build datetime generator over successive time steps."""
if timeskip is None:
timeskip = offsets.Day(1)
while datetime_start <= datetime_end:
yield datetime_start
datetime_start += timeskip
def get_first_fortnight_last_day(ds):
"""Return the last day of the datestamp's fortnight for its month."""
first_bday = ds + offsets.MonthBegin(1) - offsets.BMonthBegin(1)
first_monday_second_fortnight = first_bday + offsets.BDay(10)
last_sunday_first_fortnight = first_monday_second_fortnight - offsets.Day(1)
return last_sunday_first_fortnight
def query_yes_no(question, default='no'):
"""Ask a yes/no question via input() and return their answer.
Parameters
----------
question : str
Question presented to the user.
default : str
Default answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input
"""
valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}
if default is None:
prompt = ' [y/n] '
elif default == 'yes':
prompt = ' [Y/n] '
elif default == 'no':
prompt = ' [y/N] '
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower() # nosec
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n")
def get_ordered_factor_levels(df, col, top_n=None, min_counts=None):
"""
Return a list of a column's levels and num of levels.
Ideallly used only on factor (categorical-typed) cols or cols without a large amount
of values. Note that levels list are ordered by descending popularity.
"""
rv = df[col].value_counts()
if min_counts:
rv = rv[rv >= min_counts]
if top_n:
rv = rv[:top_n]
return rv.index.values, len(rv)
def normalize_arr(arr):
"""Normalize a numpy array to sum 1."""
arr_sum = np.sum(arr, axis=0)
return 1.0 * arr / arr_sum if arr_sum != 0 else arr
def apply_time_bounds(df, sd, ed, ds_col):
"""Filter time dates in a datetime-type column or index.
Warnings
--------
- Be aware this function potentially modify the given Dataframe df, so please send a copy of the original if you want it to remain unmodified.
"""
if ds_col:
rv = df.query(f'{ds_col} >= @sd and {ds_col} <= @ed')
else:
rv = df.loc[sd:ed]
return rv
def normalize_ds_index(df, ds_col):
"""Normalize usage of ds_col as column in df.
Warnings
--------
- Be aware this function potentially modify the given Dataframe df, so please send a copy of the original if you want it to remain unmodified.
"""
if ds_col in df.columns:
return df
elif ds_col == df.index.name:
df = df.reset_index().rename(columns={'index': ds_col})
else:
raise ValueError(f"No column or index found as '{ds_col}'.")
return df
def standarize_values(values):
"""Standarize array values with MinMAx."""
assert np.issubdtype(values, np.number)
shifted_values = values - values.min()
# Degenerate case when values array has all same input values
if np.count_nonzero(shifted_values) == 0:
return values
return shifted_values / (shifted_values.max() - shifted_values.min())
def robust_standarize_values(values):
"""Standarize values with InterQuartile Range and median."""
assert np.issubdtype(values, np.number)
return (values - values.median()) / iqr(values)
def hash_str(s, length=8):
"""Hash a string."""
return hashlib.sha256(s.encode('utf8')).hexdigest()[:length]
def df_info_to_str(df):
"""Cast df info into string type."""
buffer = io.StringIO()
df.info(buf=buffer)
return buffer.getvalue()
class JinjaTemplateException(Exception):
"""Dummy doc."""
class BadInClauseException(JinjaTemplateException):
"""Dummy doc."""
def _format_value_in_clause(value: Union[Number, str]) -> str:
"""Format value according to type.
Args:
value (Union[Number, str]): any number or string
Raises:
BadInClauseException: for values other than Number or str
Returns:
str: formatted string
"""
if isinstance(value, str):
return f"'{value}'"
elif isinstance(value, Number):
return f"{value}"
else:
raise BadInClauseException(
f"Value type: {type(value)} is not allowed for in clause formatting"
)
def format_in_clause(
iterable: Union[Tuple[Union[Number, str]], List[Union[Number, str]]]
) -> str:
"""
Create a Jinja2 filter to format list-like values passed.
Args:
iterable (list, tuple): list / tuple of strings and numbers. Can be empty.
Raises:
BadInClauseException: for non iterable inputs.
Returns:
str: The formatted string of the list of elements.
Notes:
Idea originally from
https://github.com/hashedin/jinjasql/blob/master/jinjasql/core.py
Passing an empty tuple/list won't raise an exception, in order to
simplify the function. Also, regarding failing queries, there's no
explicit goal for sql formatting (although that's a common use case).
Examples:
>>> format_in_clause([1.12, 1, 'a'])
(1.12,1,'a')
"""
if not isinstance(iterable, (list, tuple)):
raise BadInClauseException(
f"Value passed is not a list or tuple: '{iterable}'. "
f"Where the query uses the '| inclause'."
)
values = [_format_value_in_clause(v) for v in iterable]
clause = ",".join(values)
clause = "(" + clause + ")"
return clause
def get_default_jinja_template(path_or_str, filters=None, **kwargs):
"""Create Jinja specific template.."""
if filters is None:
filters = {"inclause": format_in_clause}
# The following line is labeled with nosec so that bandit doesn't fail. In DEFAULT_JINJA_ENV_ARGS, autoescape is set to True.
environment = jinja2.Environment(**{**DEFAULT_JINJA_ENV_ARGS, **kwargs}) # nosec
environment.filters = {**environment.filters, **filters}
return environment.from_string(path_or_string(path_or_str))
def get_cloudera_sql_stats_aggr(
input_expression,
as_name=None,
with_minmax=False,
with_std=False,
with_ndv=False,
with_count=False,
ends_comma=True,
):
"""Get Cloudera-valid battery of statistical aggregations clause."""
rv_l = [
f'SUM({input_expression}) AS sum',
f'AVG({input_expression}) AS mean',
f'APPX_MEDIAN({input_expression}) AS median',
]
if with_minmax:
rv_l.append(f'MIN({input_expression}) AS min')
rv_l.append(f'MAX({input_expression}) AS max')
if with_std:
rv_l.append(f'STDDEV({input_expression}) AS std')
if with_ndv:
rv_l.append(f'NDV({input_expression}) AS unique')
if with_count:
rv_l.append(f'COUNT({input_expression}) AS count_rows')
rv = ',\n'.join([f'{i}_{as_name}' for i in rv_l]) + ','
if not ends_comma:
rv = rv[:-1]
return rv
def get_cloudera_sample_cut(sample_lines_ratio=None):
"""Get cut int value for sample proportion."""
if sample_lines_ratio is None:
sample_lines_ratio = 1.0
# Generate the value for sample selection.
sampling_cut = int(2 ** 64 * sample_lines_ratio / 2.0) - 1
return sampling_cut
def get_cloudera_hashed_sample_clause(col_or_exp, sample_pct):
"""
Get Cloudera-valid clause for hashed-sampling.
This will work on an id col or on a given expression that outputs
a valid column. It takes a sample_pct number between 0 and 1.
"""
assert 0 < sample_pct < 1, f'{sample_pct} should be a float in (0,1)'
threshold_int = get_cloudera_sample_cut(sample_lines_ratio=sample_pct)
rv = f'AND abs(fnv_hash(CAST({col_or_exp} AS bigint))) <= {threshold_int}'
return rv
def str_normalize_pandas(data, str_replace_kws=None):
"""Normalize all string-like data in pandas objects.
Parameters
----------
data (pd.DataFrame, pd.Series): containing the data. Might or not have string
columns
str_replace_kws (dict): contains pandas str.replace method kwargs
Warnings
--------
- Be aware this function potentially modify the given Dataframe df, so please send a copy of the original if you want it to remain unmodified.
"""
if isinstance(data, pd.DataFrame):
obj_cols = data.select_dtypes(include=[np.object]).columns
for col in obj_cols:
data[col] = (
data[col]
.str.lower()
.str.normalize('NFKD')
.str.encode('ascii', errors='ignore')
.str.decode('utf-8')
)
if str_replace_kws:
data[col] = data[col].str.replace(**str_replace_kws)
return data
elif isinstance(data, pd.Series) and data.dtype == np.object:
data = (
data.str.lower()
.str.lower()
.str.normalize('NFKD')
.str.encode('ascii', errors='ignore')
.str.decode('utf-8')
)
if str_replace_kws:
data = data.str.replace(**str_replace_kws)
return data
else:
raise TypeError(f"File format '{type(data)}' not supported!")
def df_optimize_float_types(
df, type_mappings: Dict[str, str] = None,
):
"""Cast dataframe columns to more memory friendly types.
Parameters
----------
df: DataFrame to be modified.
type_mappings: Mapping of types. Defaults to {"float64":"float16", "float32":"float16"}
Warnings
--------
- Type conversion leads to a loss in accuracy and possible overflow of the target type.
Eg:
>>> n = 2**128
>>> np.float64(n), np.float32(n)
(3.402823669209385e+38, inf)
- Be aware this function potentially modify the given Dataframe df, so please send a copy of the original if you want it to remain unmodified.
"""
if type_mappings is None:
type_mappings = {
"float64": "float16",
"float32": "float16",
}
new_dtypes = {c: type_mappings.get(t.name, t) for c, t in df.dtypes.iteritems()}
df = df.astype(new_dtypes, copy=False)
return df
def df_replace_empty_strs_null(df):
"""Replace whitespace or empty strs with nan values.
Warnings
--------
- Be aware this function potentially modify the given Dataframe df, so please send a copy of the original if you want it to remain unmodified.
"""
str_cols = df.select_dtypes(include='object').columns.tolist()
if str_cols:
logger.debug(f'Replacing whitespace in these object cols: {str_cols}...')
for col in str_cols:
df[col].replace(r'^\s*$', np.nan, regex=True, inplace=True)
return df
def df_drop_nulls(df, max_null_prop=0.2, protected_cols: List[str] = None):
"""Drop null columns in df, for null share over a certain threshold.
Warnings
--------
- Be aware this function potentially modify the given Dataframe df, so please send a copy of the original if you want it to remain unmodified.
"""
# Note: Pandas treats string columns as `object` data types.
if protected_cols is None:
protected_cols = list()
logger.debug(
f'Dropping columns with null ratio greater than {max_null_prop:.2%}...'
)
df = df_replace_empty_strs_null(df)
null_means = df.isnull().mean()
null_mask = null_means < max_null_prop
null_mask[[c for c in df.columns if c in protected_cols]] = True
drop_cols = null_mask[~null_mask].index.tolist()
logger.debug(
f'Null proportions:\n'
f'{null_means.loc[drop_cols].sort_values(ascending=False)}'
)
logger.debug(f'Dropping the following {len(drop_cols)} columns:\n {drop_cols}')
df.drop(drop_cols, axis=1, inplace=True)
return df
def df_drop_std(df, min_std_dev=1.5e-2, protected_cols: List[str] = None):
"""Drop low variance cols.
Warnings
--------
- Be aware this function potentially modify the given Dataframe df, so please send a copy of the original if you want it to remain unmodified.
"""
if protected_cols is None:
protected_cols = list()
std_values = df.std()
low_variance_cols = std_values < min_std_dev
low_variance_cols = low_variance_cols.index[low_variance_cols].tolist()
low_variance_cols = [c for c in low_variance_cols if c not in protected_cols]
logger.debug(
f'Dropping the following {len(low_variance_cols)} columns '
f'due to low variance:\n {low_variance_cols}'
)
df.drop(low_variance_cols, axis=1, inplace=True)
return df
def df_drop_corr(
df,
target_col,
max_corr=0.3,
protected_cols: List[str] = None,
frac=0.2,
random_state=None,
):
"""Drop high correlated to-target cols.
Warnings
--------
- Be aware this function potentially modify the given Dataframe df, so please send a copy of the original if you want it to remain unmodified.
"""
if target_col not in df.columns:
raise ValueError(f"target col ({target_col}) is not in dataframe columns")
if protected_cols is None:
protected_cols = list()
corr_df = df.sample(frac=frac, random_state=random_state).corr()
high_corr_cols = abs(corr_df[target_col]) > max_corr
high_corr_cols = high_corr_cols.index[high_corr_cols].tolist()
high_corr_cols = [c for c in high_corr_cols if c not in protected_cols]
logger.debug(
f'Dropping the following {len(high_corr_cols)} columns due to high correlation '
f'with target:\n {high_corr_cols}'
)
df.drop(high_corr_cols, axis=1, inplace=True)
return df
def df_get_typed_cols(df, col_type='cat', protected_cols: List[str] = None):
"""Get typed columns, excluding protected cols if passed."""
assert col_type in ('cat', 'num', 'date', 'bool', 'timedelta')
if protected_cols is None:
protected_cols = list()
if col_type == 'cat': # Work in cases, else dont define include var
include = ['object', 'category']
elif col_type == 'num':
include = ['number']
elif col_type == 'date':
include = ['datetime']
elif col_type in ('bool', 'timedelta'):
include = [col_type]
typed_cols = [
c for c in df.select_dtypes(include=include).columns if c not in protected_cols
]
return typed_cols
def df_encode_categorical_dummies(
df,
cat_cols: List[str] = None,
skip_cols: List[str] = None,
top=25,
other_val='OTHER',
):
"""Encode categorical columns into dummies.
Warnings
--------
- Be aware this function potentially modify the given Dataframe df, so please send a copy of the original if you want it to remain unmodified.
"""
if skip_cols is None:
skip_cols = list()
if cat_cols is None:
cat_cols = list()
pre_dummy_cols = df.columns.tolist()
cat_cols = df_get_typed_cols(df, col_type='cat') if cat_cols == [] else cat_cols
cat_cols = [c for c in cat_cols if c not in skip_cols]
for c in cat_cols:
top_categories = df[c].value_counts().index.values[0:top]
df[c] = df[c].where(df[c].isin(top_categories), other=other_val)
logger.debug(f'Getting dummies from these top categories:{cat_cols}...')
df = | pd.get_dummies(df, columns=cat_cols, drop_first=False) | pandas.get_dummies |
""" test indexing with ix """
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.types.common import is_scalar
from pandas.compat import lrange
from pandas import Series, DataFrame, option_context, MultiIndex
from pandas.util import testing as tm
from pandas.core.common import PerformanceWarning
class TestIX(tm.TestCase):
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = | Series(0, index=[4, 5, 6]) | pandas.Series |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import decimal
import codecs
import csv
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import permutation_test_score
from sklearn.metrics import roc_auc_score, auc
def integrated_clf_model(feat_sel, model, train_data, test_data, cv):
starttime = time.time()
feature_list = train_data.list_features
if feat_sel == None:
pipe = Pipeline(steps=[
(model.name, model.model)
])
pipe_param_grid = model.param_grid
else:
pipe = Pipeline(steps=[
(feat_sel.name, feat_sel.model),
(model.name, model.model)
])
pipe_param_grid = dict(feat_sel.param_grid, **model.param_grid)
search = GridSearchCV(pipe, pipe_param_grid, iid=False, cv=cv, return_train_score=False, scoring='accuracy')
search.fit(train_data.X, train_data.y)
optimal_score = search.best_score_
optimal_params = search.best_params_
optimal_model = search.best_estimator_
_, _, pvalue_tested = permutation_test_score(
optimal_model,
train_data.X,
train_data.y,
scoring='accuracy',
cv=cv,
n_permutations=100,
n_jobs=1,
random_state=0
)
print('The best score is', optimal_score)
print('The corresponding parameter setting is', optimal_params)
# ========================================
# Evaluation and Visualization
# ========================================
# Optimization Curve and Selected Features (if possible)
if feat_sel and feat_sel.name == 'pca':
# plt.figure()
results = pd.DataFrame(search.cv_results_)
components_col = 'param_pca__n_components'
best_clfs = results.groupby(components_col).apply(lambda g: g.nlargest(1, 'mean_test_score'))
# best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score')
# plt.ylabel('Classification accuracy (val)')
# plt.xlabel('n_components')
# plt.title('Optimization Curve')
# plt.savefig(result_path + '/' + 'optimization_curve.png', dpi=300)
elif feat_sel and feat_sel.name == 'anova':
# plt.figure()
results = pd.DataFrame(search.cv_results_)
components_col = 'param_anova__percentile'
best_clfs = results.groupby(components_col).apply(lambda g: g.nlargest(1, 'mean_test_score'))
# best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score')
# plt.ylabel('Classification accuracy (val)')
# plt.xlabel('percentile')
# plt.title('Optimization Curve')
# plt.savefig(result_path + '/' + 'optimization_curve.png', dpi=300)
selector = optimal_model.named_steps['anova'].get_support()
selected_feature_list = np.array(feature_list)[selector]
if model.name == 'svm':
selected_weight_list = optimal_model.named_steps['svm'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif model.name == 'rf':
selected_weight_list = optimal_model.named_steps['rf'].feature_importances_
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif model.name == 'lr':
selected_weight_list = optimal_model.named_steps['lr'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif model.name == 'lda':
selected_weight_list = optimal_model.named_steps['lda'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif feat_sel and feat_sel.name == 'rfe':
# plt.figure()
results = pd.DataFrame(search.cv_results_)
components_col = 'param_rfe__n_features_to_select'
best_clfs = results.groupby(components_col).apply(lambda g: g.nlargest(1, 'mean_test_score'))
# best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score')
# plt.ylabel('Classification accuracy (val)')
# plt.xlabel('n_features_to_select')
# plt.title('Optimization Curve')
# plt.savefig(result_path + '/' + 'optimization_curve.png', dpi=300)
selector = optimal_model.named_steps['rfe'].get_support()
selected_feature_list = np.array(feature_list)[selector]
if model.name == 'svm':
selected_weight_list = optimal_model.named_steps['svm'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif model.name == 'rf':
selected_weight_list = optimal_model.named_steps['rf'].feature_importances_
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif model.name == 'lr':
selected_weight_list = optimal_model.named_steps['lr'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif model.name == 'lda':
selected_weight_list = optimal_model.named_steps['lda'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif not feat_sel:
if model.name == 'svm':
selected_weight_list = optimal_model.named_steps['svm'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': feature_list, 'Weight': selected_weight_list})
elif model.name == 'rf':
selected_weight_list = optimal_model.named_steps['rf'].feature_importances_
feature_weights_list = pd.DataFrame({'Feature': feature_list, 'Weight': selected_weight_list})
elif model.name == 'lr':
selected_weight_list = optimal_model.named_steps['lr'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': feature_list, 'Weight': selected_weight_list})
elif model.name == 'lda':
selected_weight_list = optimal_model.named_steps['lda'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': feature_list, 'Weight': selected_weight_list})
# ROC Curve and Confusion Matrix
from scipy import interp
from sklearn.metrics import roc_curve, auc
optimal_model.probability = True
predictions = optimal_model.predict(test_data.X)
probas_ = optimal_model.predict_proba(test_data.X)
predictions_list = pd.DataFrame({
'Original': test_data.y,
'Predicted': predictions,
'Proba: Group 0': probas_[:, 0],
'Proba: Group 1': probas_[:, 1]
})
from sklearn.metrics import confusion_matrix
tn, fp, fn, tp = confusion_matrix(test_data.y, predictions).ravel()
cnf_accuracy = (tn + tp) / (tn + fp + fn + tp)
test_accuracy = cnf_accuracy
cnf_sensitivity = tp / (tp + fn)
test_sensitivity = cnf_sensitivity
cnf_specificity = tn / (tn + fp)
test_specificity = cnf_specificity
# plt.figure()
mean_fpr = np.linspace(0, 1, 100)
fpr, tpr, _ = roc_curve(test_data.y, probas_[:, 1])
roc_auc = auc(fpr, tpr)
# plt.plot(fpr, tpr, lw=1, alpha=0.3, color='b',
# label='AUC = %0.2f' % (roc_auc))
# plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
# label='Chance', alpha=.8)
# plt.xlim([-0.05, 1.05])
# plt.ylim([-0.05, 1.05])
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('Receiver Operating Characteristic')
# plt.legend(loc="lower right")
# plt.savefig(result_path + '/' + 'ROC_curve.png', dpi=300)
endtime = time.time()
runtime = str(endtime - starttime)
runtime = str(decimal.Decimal(runtime).quantize(decimal.Decimal('0.00'))) + 's'
print(runtime)
result_dict = {}
result_dict['Optimal CV Accuracy'] = optimal_score
result_dict['Optimal Parameters'] = optimal_params
result_dict['Permutation Test p-Value'] = pvalue_tested
result_dict['Test Accuracy'] = test_accuracy
result_dict['Test Sensitivity'] = test_sensitivity
result_dict['Test Specificity'] = test_specificity
result_dict['Area Under Curve'] = roc_auc
result_dict['Run Time'] = runtime
result_dict['ROC fpr'] = list(fpr)
result_dict['ROC tpr'] = list(tpr)
result_dict['Predictions'] = predictions_list.to_dict('records')
try:
result_dict['Feature Weights'] = feature_weights_list.to_dict('records')
except:
result_dict['Feature Weights'] = pd.DataFrame({"Error": ["This model doesn\'t support generating feature weights"]}).to_dict('records')
if feat_sel:
result_dict['Optimization'] = best_clfs.to_dict('records')
return result_dict
def integrated_clf_model_notest(feat_sel, model, train_data, cv):
starttime = time.time()
feature_list = train_data.list_features
if feat_sel == None:
pipe = Pipeline(steps=[
(model.name, model.model)
])
pipe_param_grid = model.param_grid
else:
pipe = Pipeline(steps=[
(feat_sel.name, feat_sel.model),
(model.name, model.model)
])
pipe_param_grid = dict(feat_sel.param_grid, **model.param_grid)
search = GridSearchCV(pipe, pipe_param_grid, iid=False, cv=cv, return_train_score=False, scoring='accuracy')
search.fit(train_data.X, train_data.y)
optimal_score = search.best_score_
optimal_params = search.best_params_
optimal_model = search.best_estimator_
_, _, pvalue_tested = permutation_test_score(
optimal_model,
train_data.X,
train_data.y,
scoring='accuracy',
cv=cv,
n_permutations=100,
n_jobs=1,
random_state=0
)
print('The best score is', optimal_score)
print('The corresponding parameter setting is', optimal_params)
# ========================================
# Evaluation and Visualization
# ========================================
# Optimization Curve and Selected Features (if possible)
if feat_sel and feat_sel.name == 'pca':
# plt.figure()
results = pd.DataFrame(search.cv_results_)
components_col = 'param_pca__n_components'
best_clfs = results.groupby(components_col).apply(lambda g: g.nlargest(1, 'mean_test_score'))
# best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score')
# plt.ylabel('Classification accuracy (val)')
# plt.xlabel('n_components')
# plt.title('Optimization Curve')
# plt.savefig(result_path + '/' + 'optimization_curve.png', dpi=300)
elif feat_sel and feat_sel.name == 'anova':
# plt.figure()
results = pd.DataFrame(search.cv_results_)
components_col = 'param_anova__percentile'
best_clfs = results.groupby(components_col).apply(lambda g: g.nlargest(1, 'mean_test_score'))
# best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score')
# plt.ylabel('Classification accuracy (val)')
# plt.xlabel('percentile')
# plt.title('Optimization Curve')
# plt.savefig(result_path + '/' + 'optimization_curve.png', dpi=300)
selector = optimal_model.named_steps['anova'].get_support()
selected_feature_list = np.array(feature_list)[selector]
if model.name == 'svm':
selected_weight_list = optimal_model.named_steps['svm'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif model.name == 'rf':
selected_weight_list = optimal_model.named_steps['rf'].feature_importances_
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif model.name == 'lr':
selected_weight_list = optimal_model.named_steps['lr'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif model.name == 'lda':
selected_weight_list = optimal_model.named_steps['lda'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif feat_sel and feat_sel.name == 'rfe':
# plt.figure()
results = pd.DataFrame(search.cv_results_)
components_col = 'param_rfe__n_features_to_select'
best_clfs = results.groupby(components_col).apply(lambda g: g.nlargest(1, 'mean_test_score'))
# best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score')
# plt.ylabel('Classification accuracy (val)')
# plt.xlabel('n_features_to_select')
# plt.title('Optimization Curve')
# plt.savefig(result_path + '/' + 'optimization_curve.png', dpi=300)
selector = optimal_model.named_steps['rfe'].get_support()
selected_feature_list = np.array(feature_list)[selector]
if model.name == 'svm':
selected_weight_list = optimal_model.named_steps['svm'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif model.name == 'rf':
selected_weight_list = optimal_model.named_steps['rf'].feature_importances_
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif model.name == 'lr':
selected_weight_list = optimal_model.named_steps['lr'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif model.name == 'lda':
selected_weight_list = optimal_model.named_steps['lda'].coef_[0]
feature_weights_list = pd.DataFrame({'Feature': selected_feature_list, 'Weight': selected_weight_list})
elif not feat_sel:
if model.name == 'svm':
selected_weight_list = optimal_model.named_steps['svm'].coef_[0]
feature_weights_list = | pd.DataFrame({'Feature': feature_list, 'Weight': selected_weight_list}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import csv
import glob
import math as m
import os
from datetime import timedelta
from getpass import getpass
from math import ceil
import numpy as np
import pandas as pd
import robin_stocks as r
from portfolios.portfolio.portfolio import Portfolio
from portfolios.utils.helpers import last_trading_day
def parse_portfolio(df=None, p=None):
"""
Takes a dataframe with transactions and performs those
on a given portfolio
Parameters
==========
df : input dataframe
p : portfolio (Portfolio class object)
Returns
=======
p : modified portfolio (Portfolio class object)
"""
# use a lower bound on the minimum number of days pulled
minimum_date_for_data = last_trading_day() - timedelta(weeks=1)
# put input dataframe(s) in list
dfs = []
if type(df) == pd.core.frame.DataFrame:
dfs.append(df)
else:
dfs.extend(df)
# loop through list of dataframes
Tickers_all = []
for df in dfs:
Tickers_all = Tickers_all + list(df.Ticker.values)
# load data for all securities
for ticker in list(set(Tickers_all)):
if ticker:
if ticker not in p.securities_archive:
if str(ticker).isalnum() & (str(ticker) != "nan"):
print("Adding ", ticker)
min_date = minimum_date_for_data
for df in dfs:
if ticker in list(set(df.Ticker)):
first_date = min(df.loc[df.Ticker == ticker, "Date"].values)
min_date = min(first_date, min_date)
p.add_security_archive(ticker, min_date)
for df in dfs:
# define a priority for transaction types so ordering makes sense
df.loc[df.Transaction == "deposit", "Priority"] = 1
df.loc[df.Transaction == "Contribution", "Priority"] = 1
df.loc[df.Transaction == "Funds Received", "Priority"] = 1
df.loc[df.Transaction == "Conversion (incoming)", "Priority"] = 1
df.loc[df.Transaction == "buy", "Priority"] = 2
df.loc[df.Transaction == "Buy", "Priority"] = 2
df.loc[df.Transaction == "Reinvestment", "Priority"] = 2
df.loc[df.Transaction == "dividend", "Priority"] = 3
df.loc[df.Transaction == "Dividend", "Priority"] = 3
df.loc[df.Transaction == "sell", "Priority"] = 4
df.loc[df.Transaction == "Sell", "Priority"] = 4
df.loc[df.Transaction == "withdraw", "Priority"] = 5
df.loc[df.Transaction == "Distribution", "Priority"] = 5
df.sort_values(by=["Date", "Priority"], inplace=True)
for index, row in df.iterrows():
if row.notnull()["Date"]:
# print(row['Date'], row['Transaction'], row['Ticker'], row['Currency'], row['Price'], row['Quantity'])
if str.lower(row["Transaction"]) == "buy":
p.buy_security(
date=row["Date"],
ticker=row["Ticker"],
currency=row["Currency"],
price=row["Price"],
quantity=row["Quantity"],
)
elif str.lower(row["Transaction"]) == "sell":
p.sell_security(
date=row["Date"],
ticker=row["Ticker"],
currency=row["Currency"],
price=row["Price"],
quantity=row["Quantity"],
)
# FINRA fee of $.000119 per share up to $5.95
#_FINRAfee = min(
# max(ceil(0.0119 * row["Quantity"]), 1.0) / 100.0, 5.95
#)
# SEC fee of $.000013 per trade of up to $1M
#if not np.isnan(row["Price"]):
# _SECfee = max(ceil(row["Quantity"] * row["Price"] / 800.0), 1.0) / 100.0
#else:
# _SECfee = 0.000013 # need to find a better place to put this fee where price is known
#p.wallet = p.wallet.append(
# {"Date": row["Date"], "Change": -_FINRAfee - _SECfee},
# ignore_index=True,
#)
elif str.lower(row["Transaction"]) == "deposit":
p.deposit_cash(
date=row["Date"],
currency=row["Currency"],
price=row["Price"],
quantity=row["Quantity"],
)
elif str.lower(row["Transaction"]) == "withdraw":
p.withdraw_cash(
date=row["Date"],
currency=row["Currency"],
price=row["Price"],
quantity=row["Quantity"],
)
elif row["Transaction"] == "Dividend":
p.dividend(
date=row["Date"],
ticker=row["Ticker"],
currency=row["Currency"],
price=1.0,
quantity=row["Dollars"],
)
elif row["Transaction"] == "dividend":
p.dividend(
date=row["Date"],
ticker=row["Ticker"],
currency=row["Currency"],
price=1.0,
quantity=row["Quantity"],
)
else:
pass
else:
pass
return p
def parse_portfolio_vanguard(df=None, p=None):
"""
Takes a dataframe with transactions in Vanguard format
and performs those on a given portfolio
Parameters
==========
df : input dataframe
p : portfolio (Portfolio class object)
Returns
=======
p : modified portfolio (Portfolio class object)
"""
# use a lower bound on the minimum number of days pulled
minimum_date_for_data = last_trading_day() - timedelta(weeks=1)
# put input dataframe(s) in list
dfs = []
if type(df) == pd.core.frame.DataFrame:
dfs.append(df)
else:
dfs.extend(df)
# loop through list of dataframes
Tickers_all = []
for df in dfs:
Tickers_all = Tickers_all + list(df.Ticker.values)
# load data for all securities
for ticker in list(set(Tickers_all)):
if ticker:
if ticker not in p.securities_archive:
if str(ticker).isalnum() & (str(ticker) != "nan"):
print("Adding ", ticker)
min_date = minimum_date_for_data
for df in dfs:
if ticker in list(set(df.Ticker)):
first_date = min(df.loc[df.Ticker == ticker, "Date"].values)
min_date = min(first_date, min_date)
p.add_security_archive(ticker, min_date)
for df in dfs:
# define a priority for transaction types so ordering makes sense
df.loc[df.Transaction == "deposit", "Priority"] = 1
df.loc[df.Transaction == "Contribution", "Priority"] = 1
df.loc[df.Transaction == "Funds Received", "Priority"] = 1
df.loc[df.Transaction == "Conversion (incoming)", "Priority"] = 1
df.loc[df.Transaction == "buy", "Priority"] = 2
df.loc[df.Transaction == "Buy", "Priority"] = 2
df.loc[df.Transaction == "Reinvestment", "Priority"] = 2
df.loc[df.Transaction == "Reinvestment (LT)", "Priority"] = 2
df.loc[df.Transaction == "Reinvestment (ST)", "Priority"] = 2
df.loc[df.Transaction == "dividend", "Priority"] = 3
df.loc[df.Transaction == "Dividend", "Priority"] = 3
df.loc[df.Transaction == "Capital gain (LT)", "Priority"] = 3
df.loc[df.Transaction == "Capital gain (ST)", "Priority"] = 3
df.loc[df.Transaction == "sell", "Priority"] = 4
df.loc[df.Transaction == "Sell", "Priority"] = 4
df.loc[df.Transaction == "Withdrawal", "Priority"] = 5
df.loc[df.Transaction == "withdraw", "Priority"] = 5
df.loc[df.Transaction == "Distribution", "Priority"] = 5
df.sort_values(by=["Date", "Priority"], inplace=True)
for index, row in df.iterrows():
if row.notnull()["Date"]:
# print(row['Date'], row['Transaction'], row['Ticker'], row['Currency'], row['Price'], row['Quantity'], row['Dollars'])
if (
row["Transaction"] == "Buy"
or row["Transaction"] == "buy"
):
p.buy_security(
date=row["Date"],
ticker=row["Ticker"],
currency=row["Currency"],
price=row["Price"],
quantity=row["Quantity"],
)
elif (
row["Transaction"] == "Sell"
or row["Transaction"] == "sell"
):
p.sell_security(
date=row["Date"],
ticker=row["Ticker"],
currency=row["Currency"],
price=row["Price"],
quantity=-row["Quantity"],
) # note the minus sign
elif (
row["Transaction"] == "Contribution"
or row["Transaction"] == "Funds Received"
or row["Transaction"] == "Conversion (incoming)"
or row["Transaction"] == "deposit"
):
p.deposit_cash(
date=row["Date"],
currency=row["Currency"],
price=1.0,
quantity=row["Dollars"],
)
elif (
row["Transaction"] == "Distribution"
or row["Transaction"] == "Withdrawal"
or row["Transaction"] == "withdraw"
):
p.withdraw_cash(
date=row["Date"],
currency=row["Currency"],
price=1.0,
quantity=-1.0*row["Dollars"],
)
elif (
row["Transaction"] == "Dividend"
or row["Transaction"] == "Capital gain (LT)"
or row["Transaction"] == "Capital gain (ST)"
):
p.dividend(
date=row["Date"],
ticker=row["Ticker"],
currency=row["Currency"],
price=1.0,
quantity=row["Dollars"],
)
elif (
row["Transaction"] == "Reinvestment"
or row["Transaction"] == "Reinvestment (LT)"
or row["Transaction"] == "Reinvestment (ST)"
) and row["Quantity"] != 0:
p.buy_security(
date=row["Date"],
ticker=row["Ticker"],
currency=row["Currency"],
price=row["Price"],
quantity=row["Quantity"],
)
else:
pass
else:
pass
return p
def import_portfolio(path="", name="RobinHood"):
"""
Reads in CSV file with portfolio transactions
Parameters
==========
path : path and name of CSV file
name : desired portfolio name
Returns
=======
df : dataframe with transactions from CSV file
p : portfolio (Portfolio class object)
"""
# get all files that match path
all_files = glob.glob(path)
all_dfs = []
for file in all_files:
print("Reading in {}".format(file))
# read in transaction list
df = pd.read_csv(file, parse_dates=[0])
# collect all dfs before concatenating them
all_dfs.append(df)
# final portfolio df to parse
df = pd.concat(all_dfs, axis=0, ignore_index=True)
# create a new portfolio object
p = Portfolio(name=name)
# parse
parse_portfolio(df, p)
return p
def import_portfolio_vanguard(path="", name="Vanguard"):
"""
Reads in CSV file with portfolio transactions in Vanguard format
Parameters
==========
path : path and name of CSV file
name : desired portfolio name
Returns
=======
df : dataframe with transactions from CSV file
p : portfolio (Portfolio class object)
"""
# get all files that match path
all_files = glob.glob(path)
all_dfs = []
for file in all_files:
print("Reading in {}".format(file))
# read in csv to find beginning of transaction list
input_file = csv.reader(open(file, "r"), delimiter=",")
for i, row in enumerate(input_file):
if len(row) > 0 and "Trade" in row[1]:
break
# read in transaction list
df = pd.read_csv(
file,
skiprows=i,
usecols=[
"Trade Date",
"Transaction Type",
"Symbol",
"Shares",
"Share Price",
"Principal Amount",
],
parse_dates=["Trade Date"],
skip_blank_lines=False,
)
# rename columns to match standard format
df.columns = ["Date", "Transaction", "Ticker", "Quantity", "Price", "Dollars"]
# add a currency column to match standard format
df["Currency"] = "USD"
# collect all dfs before concatenating them
all_dfs.append(df)
# final portfolio df to parse
df = pd.concat(all_dfs, axis=0, ignore_index=True)
# create a new portfolio object
p = Portfolio(name=name)
# parse
p = parse_portfolio_vanguard(df, p)
return p
def import_portfolio_robinhood(
access_token, username=None, password=None, name="Robinhood", free_stock=False, merger=False
):
"""
Accesses Robinhood account and downloads transactions
Parameters
==========
username : Robinhood username
password : <PASSWORD>
name : desired portfolio name
free_stock : include a free stock not captured by transaction history (see below)
Returns
=======
df : dataframe with transactions
p : portfolio (Portfolio class object)
"""
if not access_token:
if username is None:
username = getpass("Username: ")
if password is None:
password = getpass("Password: ")
# use Robinhood api to access account and cancel all standing orders
r.login(username, password)
# build dataframe
Date = []
Transaction = []
Ticker = []
Currency = []
Price = []
Quantity = []
# parse order history
orders = r.get_all_stock_orders()
print("Parsing orders ...")
# pre-pull all tickers before creating order history df
Tickerset = []
for order in orders:
if len(order["executions"]):
Tickerset.append(order["instrument"])
Tickerset = list(set(Tickerset))
# make a lookup dict for ticker symbols
Tickersymbols = {}
for element in Tickerset:
Tickersymbols[element] = r.get_instrument_by_url(element)["symbol"]
for order in orders:
if len(order["executions"]):
Date.append(pd.to_datetime(order["last_transaction_at"]))
Transaction.append(order["side"])
Ticker.append(Tickersymbols[order["instrument"]])
Currency.append("USD")
Price.append(order["average_price"])
Quantity.append(order["quantity"])
# add deposits
transfers = r.get_bank_transfers()
print("Parsing bank transfers ...")
for transfer in transfers:
if transfer["cancel"] is None:
Date.append(pd.to_datetime(transfer["created_at"]))
Transaction.append(transfer["direction"])
Ticker.append(None)
Currency.append("USD")
Price.append(1.0)
Quantity.append(transfer["amount"])
# add dividends
dividends = r.get_dividends()
print("Parsing dividends ...")
for dividend in dividends:
if dividend["state"] == "paid":
Date.append( | pd.to_datetime(dividend["paid_at"]) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 11:26:20 2018
@author: nbaya
"""
import os
import glob
import re
import pandas as pd
from subprocess import call
from joblib import Parallel, delayed
import multiprocessing
import sys
import numpy as np
v3_path = "/Users/nbaya/Documents/lab/ukbb-sexdiff/imputed-v3-results/"
#Get saved phenotypes
malefiles = (list(map(os.path.basename,glob.glob(v3_path+"*.male*.gz")))) #restrict to male files to prevent counting phenotype twice
find = re.compile(r"^(.*?)\..*") #regex search term for grabbing all the text before the first period in a string
savedphenotypes = list(map(lambda filename: re.search(find,filename).group(1), malefiles)) #list of all downloaded phenotypes (for me, it gives 78: 77 original samples + 20116_2)
#Get all phenotypes
allphenotypes = pd.Series.tolist(pd.read_table(v3_path+"phenotypes.both_sexes.tsv").iloc[:]["phenotype"]) #list of all phenotypes (male & female)
allphenotypes = pd.DataFrame({'phenotype':allphenotypes})
allphenotypes.to_csv(v3_path+"allphenotypeslist.tsv",sep = "\t")
# TEMPORARY -------------------------------------------------------------------
#savedFiles= (list(map(os.path.basename,glob.glob(chrX_path+"*.gz")))) #restrict to male files to prevent counting phenotype twice
#find = re.compile(r"^(.*?)\..*") #regex search term for grabbing all the text before the first period in a string
#newphenotypes = list(map(lambda filename: re.search(find,filename).group(1), savedFiles)) #list of all downloaded phenotypes (for me, it gives 78: 77 original samples + 20116_2)
#
#nextphenotypes = list(set(savedphenotypes).difference(set(newphenotypes)))
#
#len(nextphenotypes)
# -----------------------------------------------------------------------------
n_cores = multiprocessing.cpu_count()
#old method of extracting chrX
def prev_chrX_from_saved_phenotypes(ph):
tb_male = pd.read_csv((v3_path+ph+".imputed_v3.results.male.tsv.gz"), compression='gzip', sep='\t') #read files
tb_female = pd.read_csv((v3_path+ph+".imputed_v3.results.female.tsv.gz"), compression='gzip', sep='\t')
chrX_male = tb_male[tb_male.iloc[:]["variant"].str.match('X')][:] #get chrX variants for males
chrX_female = tb_female[tb_female.iloc[:]["variant"].str.match('X')][:] #get chrX variants for females
chrX = pd.merge(chrX_male,chrX_female, on = 'variant',suffixes = ("_male","_female"))
chrX.to_csv(chrX_path+ph+".chrX.tsv.gz",sep = '\t', compression = 'gzip')
#Parallel(n_jobs=n_cores,verbose = 50)(delayed(chrX_from_saved_phenotypes)(ph) for ph in savedphenotypes)
# TEMPORARY -------------------------------------------------------------------
#Parallel(n_jobs=n_cores,verbose = 50)(delayed(chrX_from_saved_phenotypes)(ph) for ph in nextphenotypes)
# -----------------------------------------------------------------------------
#def chrX_from_new_phenotypes(ph):
#
## call(["gsutil" ,"cp","gs://ukbb-gwas-imputed-v3-results/export1/"+ph+".**male*",
## "~/Documents/lab/ukbb-sexdiff/chrX/"])
#
#
# call('gsutil ls gs://ukbb-gwas-imputed-v3-results/export1/'+ph+'.**male*', shell=True)
## "~/Documents/lab/ukbb-sexdiff/chrX/',)
## call(["paste","<(cat", ph, ".imputed_v3.results.female.tsv.gz","|","zcat",
## "|" , "cut -f 1,2,3,5,6,8)", "<(cat", ph,".imputed_v3.results.male.tsv.gz" ,
## "|", "zcat", "|", "cut", "-f", "1,2,3,5,6,8)", "|", "awk" ,"\'", "NR==1{",
## "print", "\"variant\",\"n_female\",\"n_male\",\"frq_female\",\"frq_male\",\"beta_female\",\"se_female\",\"p_female\",\"beta_male\",\"se_male\",\"p_male\"",
## "}NR>1", "&&", "$1==$7{", "maff=$3/(2*$2);" , "mafm=$9/(2*$8);" ,
## "if(maff > .05 && maff<.95 && mafm > .05 && mafm < .95){",
## "print $1,$2,$8,maff,mafm,$4,$5,$6,$10,$11,$12} }\' | gzip >", ph, ".sexdiff.gz]"])
#
#testph = ['46','47']
#
#for ph in testph:
# chrX_from_new_phenotypes(ph)
#for ph in set(allphenotypes).difference(set(savedphenotypes)): #for all phenotypes not saved
# -----------------------------------------------------------------------------
chrX_path = "/Users/nbaya/Documents/lab/ukbb-sexdiff/chrX/data/"
ph = "1757"
#Males
tb_male = pd.read_csv((v3_path+ph+".imputed_v3.results.male.tsv.gz"), compression='gzip', sep='\t') #read files
chrX_male = tb_male[tb_male.iloc[:]["variant"].str.match('X')][:] #get chrX variants for males
chrX_male = chrX_male.reset_index() #necessary for upcoming concat between chrX_male and a3
a1 = np.asarray(chrX_male.iloc[:,0])
a2 = list(map(lambda variant: str(variant).split(':'), a1))
a3 = pd.DataFrame(np.asarray(a2).reshape((len(a2),4)))
chrX_male2 = pd.concat([a3[[0,1,3,2]],chrX_male], axis = 1).drop(['index','tstat','AC','ytx'], axis =1)
chrX_male2.rename(index=str, columns={0: "CHR", 1: "POS", 3: "EFFECT_ALLELE", 2: "NON_EFFECT_ALLELE",
"variant": "SNP", "nCompleteSamples": "N", "beta": "BETA",
"se": "SE", "pval": "P_VAL"})
chrX_male2.to_csv(chrX_path+ph+".chrX.male.tsv.gz",sep = '\t', compression = 'gzip')
#Females
tb_female = | pd.read_csv((v3_path+ph+".imputed_v3.results.female.tsv.gz"), compression='gzip', sep='\t') | pandas.read_csv |
import os
import sys
import pytest
from shapely.geometry import Polygon, GeometryCollection
from pandas import DataFrame, Timestamp
from pandas.testing import assert_frame_equal
from tests.fixtures import *
from tests.test_core_components_route import self_looping_route, route
from tests.test_core_components_service import service
from genet.inputs_handler import matsim_reader, gtfs_reader
from genet.inputs_handler import read
from genet.schedule_elements import Schedule, Service, Route, Stop, read_vehicle_types
from genet.utils import plot, spatial
from genet.validate import schedule_validation
from genet.exceptions import ServiceIndexError, RouteIndexError, StopIndexError, UndefinedCoordinateSystemError, \
ConflictingStopData, InconsistentVehicleModeError
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
pt2matsim_schedule_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "schedule.xml"))
pt2matsim_vehicles_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "vehicles.xml"))
@pytest.fixture()
def schedule():
route_1 = Route(route_short_name='name',
mode='bus', id='1',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'), Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'), Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
route_2 = Route(route_short_name='name_2',
mode='bus', id='2',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700'), Stop(id='6', x=1, y=2, epsg='epsg:27700'),
Stop(id='7', x=3, y=3, epsg='epsg:27700'), Stop(id='8', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
service = Service(id='service', routes=[route_1, route_2])
return Schedule(epsg='epsg:27700', services=[service])
@pytest.fixture()
def strongly_connected_schedule():
route_1 = Route(route_short_name='name',
mode='bus',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700', name='Stop_1'),
Stop(id='2', x=1, y=2, epsg='epsg:27700', name='Stop_2'),
Stop(id='3', x=3, y=3, epsg='epsg:27700', name='Stop_3'),
Stop(id='4', x=7, y=5, epsg='epsg:27700', name='Stop_4'),
Stop(id='1', x=4, y=2, epsg='epsg:27700', name='Stop_1')],
trips={'trip_id': ['1', '2'], 'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['1', '2'], departure_offsets=['1', '2'],
id='1')
route_2 = Route(route_short_name='name_2',
mode='bus',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700', name='Stop_5'),
Stop(id='2', x=1, y=2, epsg='epsg:27700', name='Stop_2'),
Stop(id='7', x=3, y=3, epsg='epsg:27700', name='Stop_7'),
Stop(id='8', x=7, y=5, epsg='epsg:27700', name='Stop_8'),
Stop(id='5', x=4, y=2, epsg='epsg:27700', name='Stop_5')],
trips={'trip_id': ['1', '2'], 'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
arrival_offsets=['1', '2', '3', '4', '5'],
departure_offsets=['1', '2', '3', '4', '5'],
id='2')
service = Service(id='service', routes=[route_1, route_2])
return Schedule(epsg='epsg:27700', services=[service])
def test_initiating_schedule(schedule):
s = schedule
assert_semantically_equal(dict(s._graph.nodes(data=True)), {
'5': {'services': {'service'}, 'routes': {'2'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'6': {'services': {'service'}, 'routes': {'2'}, 'id': '6', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'7': {'services': {'service'}, 'routes': {'2'}, 'id': '7', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76683608549253, 'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()},
'8': {'services': {'service'}, 'routes': {'2'}, 'id': '8', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766856648946295, 'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766856648946295, 'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76683608549253, 'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()}})
assert_semantically_equal(s._graph.edges(data=True)._adjdict,
{'5': {'6': {'services': {'service'}, 'routes': {'2'}}},
'6': {'7': {'services': {'service'}, 'routes': {'2'}}},
'7': {'8': {'services': {'service'}, 'routes': {'2'}}}, '8': {}, '4': {},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}},
'2': {'3': {'services': {'service'}, 'routes': {'1'}}}})
log = s._graph.graph.pop('change_log')
assert log.empty
assert_semantically_equal(s._graph.graph,
{'name': 'Schedule graph',
'routes': {'2': {'route_short_name': 'name_2', 'mode': 'bus',
'trips': {'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
'arrival_offsets': ['00:00:00', '00:03:00',
'00:07:00', '00:13:00'],
'departure_offsets': ['00:00:00', '00:05:00',
'00:09:00', '00:15:00'],
'route_long_name': '', 'id': '2', 'route': [],
'await_departure': [],
'ordered_stops': ['5', '6', '7', '8']},
'1': {'route_short_name': 'name', 'mode': 'bus',
'trips': {'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
'arrival_offsets': ['00:00:00', '00:03:00',
'00:07:00', '00:13:00'],
'departure_offsets': ['00:00:00', '00:05:00',
'00:09:00', '00:15:00'],
'route_long_name': '', 'id': '1', 'route': [],
'await_departure': [],
'ordered_stops': ['1', '2', '3', '4']}},
'services': {'service': {'id': 'service', 'name': 'name'}},
'route_to_service_map': {'1': 'service', '2': 'service'},
'service_to_route_map': {'service': ['1', '2']},
'crs': {'init': 'epsg:27700'}})
def test_initiating_schedule_with_non_uniquely_indexed_objects():
route_1 = Route(route_short_name='name',
mode='bus', id='',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'), Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'), Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
route_2 = Route(route_short_name='name_2',
mode='bus', id='',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700'), Stop(id='6', x=1, y=2, epsg='epsg:27700'),
Stop(id='7', x=3, y=3, epsg='epsg:27700'), Stop(id='8', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_2_bus', 'veh_3_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
service1 = Service(id='service', routes=[route_1, route_2])
service2 = Service(id='service', routes=[route_1, route_2])
s = Schedule(epsg='epsg:27700', services=[service1, service2])
assert s.number_of_routes() == 4
assert len(s) == 2
def test__getitem__returns_a_service(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert schedule['service'] == services[0]
def test_accessing_route(schedule):
assert schedule.route('1') == Route(route_short_name='name',
mode='bus', id='1',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'),
Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'),
Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['1', '2'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
def test__repr__shows_number_of_services(mocker):
mocker.patch.object(Schedule, '__len__', return_value=0)
schedule = Schedule('epsg:27700')
s = schedule.__repr__()
assert 'instance at' in s
assert 'services' in s
Schedule.__len__.assert_called()
def test__str__shows_info():
schedule = Schedule('epsg:27700')
assert 'Number of services' in schedule.__str__()
assert 'Number of routes' in schedule.__str__()
def test__len__returns_the_number_of_services(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert len(schedule) == 1
def test_print_shows_info(mocker):
mocker.patch.object(Schedule, 'info')
schedule = Schedule('epsg:27700')
schedule.print()
Schedule.info.assert_called_once()
def test_info_shows_number_of_services_and_routes(mocker):
mocker.patch.object(Schedule, '__len__', return_value=0)
mocker.patch.object(Schedule, 'number_of_routes')
schedule = Schedule('epsg:27700')
schedule.print()
Schedule.__len__.assert_called()
Schedule.number_of_routes.assert_called_once()
def test_plot_delegates_to_util_plot_plot_graph_routes(mocker, schedule):
mocker.patch.object(plot, 'plot_graph')
schedule.plot()
plot.plot_graph.assert_called_once()
def test_reproject_changes_projection_for_all_stops_in_route():
correct_x_y = {'x': -0.14967658860132668, 'y': 51.52393050617373}
schedule = Schedule(
'epsg:27700',
[Service(id='10314', routes=[
Route(
route_short_name='12',
mode='bus',
stops=[Stop(id='26997928P', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700'),
Stop(id='26997928P.link:1', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700')],
route=['1'],
trips={'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'],
'vehicle_id': ['veh_1_bus']},
arrival_offsets=['00:00:00', '00:02:00'],
departure_offsets=['00:00:00', '00:02:00']
)
])])
schedule.reproject('epsg:4326')
_stops = list(schedule.stops())
stops = dict(zip([stop.id for stop in _stops], _stops))
assert_semantically_equal({'x': stops['26997928P'].x, 'y': stops['26997928P'].y}, correct_x_y)
assert_semantically_equal({'x': stops['26997928P.link:1'].x, 'y': stops['26997928P.link:1'].y}, correct_x_y)
def test_adding_merges_separable_schedules(route):
schedule = Schedule(epsg='epsg:4326', services=[Service(id='1', routes=[route])])
before_graph_nodes = schedule.reference_nodes()
before_graph_edges = schedule.reference_edges()
a = Stop(id='10', x=40, y=20, epsg='epsg:27700', linkRefId='1')
b = Stop(id='20', x=10, y=20, epsg='epsg:27700', linkRefId='2')
c = Stop(id='30', x=30, y=30, epsg='epsg:27700', linkRefId='3')
d = Stop(id='40', x=70, y=50, epsg='epsg:27700', linkRefId='4')
schedule_to_be_added = Schedule(epsg='epsg:4326', services=[Service(id='2', routes=[
Route(
route_short_name='name',
mode='bus',
stops=[a, b, c, d],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['04:40:00', '05:40:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'],
route=['1', '2', '3', '4'], id='2')
])])
tba_graph_nodes = schedule_to_be_added.reference_nodes()
tba_graph_edges = schedule_to_be_added.reference_edges()
schedule.add(schedule_to_be_added)
assert '1' in list(schedule.service_ids())
assert '2' in list(schedule.service_ids())
assert '1' in list(schedule.route_ids())
assert '2' in list(schedule.route_ids())
assert schedule.epsg == 'epsg:4326'
assert schedule.epsg == schedule_to_be_added.epsg
assert set(schedule._graph.nodes()) == set(before_graph_nodes) | set(tba_graph_nodes)
assert set(schedule._graph.edges()) == set(before_graph_edges) | set(tba_graph_edges)
def test_adding_throws_error_when_schedules_not_separable(test_service):
schedule = Schedule(epsg='epsg:4326', services=[test_service])
assert 'service' in schedule
schedule_to_be_added = Schedule(epsg='epsg:4326', services=[test_service])
with pytest.raises(NotImplementedError) as e:
schedule.add(schedule_to_be_added)
assert 'This method only supports adding non overlapping services' in str(e.value)
def test_adding_calls_on_reproject_when_schedules_dont_have_matching_epsg(test_service, different_test_service, mocker):
mocker.patch.object(Schedule, 'reproject')
schedule = Schedule(services=[test_service], epsg='epsg:27700')
assert schedule.has_service('service')
schedule_to_be_added = Schedule(services=[different_test_service], epsg='epsg:4326')
schedule.add(schedule_to_be_added)
schedule_to_be_added.reproject.assert_called_once_with('epsg:27700')
def test_service_ids_returns_keys_of_the_services_dict(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert set(schedule.service_ids()) == {'service'}
def test_routes_returns_service_ids_with_unique_routes(route, similar_non_exact_test_route):
services = [Service(id='1', routes=[route]), Service(id='2', routes=[similar_non_exact_test_route])]
schedule = Schedule(services=services, epsg='epsg:4326')
routes = list(schedule.routes())
assert route in routes
assert similar_non_exact_test_route in routes
assert len(routes) == 2
def test_number_of_routes_counts_routes(test_service, different_test_service):
schedule = Schedule(services=[test_service, different_test_service], epsg='epsg:4362')
assert schedule.number_of_routes() == 3
def test_service_attribute_data_under_key(schedule):
df = schedule.service_attribute_data(keys='name').sort_index()
assert_frame_equal(df, DataFrame(
{'name': {'service': 'name'}}
))
def test_service_attribute_data_under_keys(schedule):
df = schedule.service_attribute_data(keys=['name', 'id']).sort_index()
assert_frame_equal(df, DataFrame(
{'name': {'service': 'name'}, 'id': {'service': 'service'}}
))
def test_route_attribute_data_under_key(schedule):
df = schedule.route_attribute_data(keys='route_short_name').sort_index()
assert_frame_equal(df, DataFrame(
{'route_short_name': {'1': 'name', '2': 'name_2'}}
))
def test_route_attribute_data_under_keys(schedule):
df = schedule.route_attribute_data(keys=['route_short_name', 'mode']).sort_index()
assert_frame_equal(df, DataFrame(
{'route_short_name': {'1': 'name', '2': 'name_2'}, 'mode': {'1': 'bus', '2': 'bus'}}
))
def test_stop_attribute_data_under_key(schedule):
df = schedule.stop_attribute_data(keys='x').sort_index()
assert_frame_equal(df, DataFrame(
{'x': {'1': 4.0, '2': 1.0, '3': 3.0, '4': 7.0, '5': 4.0, '6': 1.0, '7': 3.0, '8': 7.0}}))
def test_stop_attribute_data_under_keys(schedule):
df = schedule.stop_attribute_data(keys=['x', 'y']).sort_index()
assert_frame_equal(df, DataFrame(
{'x': {'1': 4.0, '2': 1.0, '3': 3.0, '4': 7.0, '5': 4.0, '6': 1.0, '7': 3.0, '8': 7.0},
'y': {'1': 2.0, '2': 2.0, '3': 3.0, '4': 5.0, '5': 2.0, '6': 2.0, '7': 3.0, '8': 5.0}}))
def test_extracting_services_on_condition(schedule):
ids = schedule.extract_service_ids_on_attributes(conditions={'name': 'name'})
assert ids == ['service']
def test_extracting_routes_on_condition(schedule):
ids = schedule.extract_route_ids_on_attributes(conditions=[{'mode': 'bus'}, {'route_short_name': 'name_2'}],
how=all)
assert ids == ['2']
def test_extracting_stops_on_condition(schedule):
ids = schedule.extract_stop_ids_on_attributes(conditions=[{'x': (0, 4)}, {'y': (0, 2)}], how=all)
assert set(ids) == {'5', '6', '1', '2'}
def test_getting_services_on_modal_condition(schedule):
service_ids = schedule.services_on_modal_condition(modes='bus')
assert service_ids == ['service']
def test_getting_routes_on_modal_condition(schedule):
route_ids = schedule.routes_on_modal_condition(modes='bus')
assert set(route_ids) == {'1', '2'}
def test_getting_stops_on_modal_condition(schedule):
stop_ids = schedule.stops_on_modal_condition(modes='bus')
assert set(stop_ids) == {'5', '6', '7', '8', '3', '1', '4', '2'}
test_geojson = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "test_geojson.geojson"))
def test_getting_stops_on_spatial_condition_with_geojson(schedule, mocker):
mocker.patch.object(spatial, 'read_geojson_to_shapely',
return_value=GeometryCollection(
[Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])]))
stops = schedule.stops_on_spatial_condition(test_geojson)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_stops_on_spatial_condition_with_shapely_polygon(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
stops = schedule.stops_on_spatial_condition(p)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_stops_on_spatial_condition_with_s2_hex_region(schedule):
s2_region = '4837,4839,483f5,4844,4849'
stops = schedule.stops_on_spatial_condition(s2_region)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_routes_intersecting_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.routes_on_spatial_condition(p)
assert set(routes) == {'1', '2'}
def test_getting_routes_contained_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.routes_on_spatial_condition(p, how='within')
assert set(routes) == {'1', '2'}
def test_getting_services_intersecting_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.services_on_spatial_condition(p)
assert set(routes) == {'service'}
def test_getting_services_contained_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.services_on_spatial_condition(p, how='within')
assert set(routes) == {'service'}
def test_applying_attributes_to_service(schedule):
assert schedule._graph.graph['services']['service']['name'] == 'name'
assert schedule['service'].name == 'name'
schedule.apply_attributes_to_services({'service': {'name': 'new_name'}})
assert schedule._graph.graph['services']['service']['name'] == 'new_name'
assert schedule['service'].name == 'new_name'
def test_applying_attributes_changing_id_to_service_throws_error(schedule):
assert 'service' in schedule._graph.graph['services']
assert schedule._graph.graph['services']['service']['id'] == 'service'
assert schedule['service'].id == 'service'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_services({'service': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def test_applying_attributes_to_route(schedule):
assert schedule._graph.graph['routes']['1']['route_short_name'] == 'name'
assert schedule.route('1').route_short_name == 'name'
schedule.apply_attributes_to_routes({'1': {'route_short_name': 'new_name'}})
assert schedule._graph.graph['routes']['1']['route_short_name'] == 'new_name'
assert schedule.route('1').route_short_name == 'new_name'
def test_applying_mode_attributes_to_route_results_in_correct_mode_methods(schedule):
assert schedule.route('1').mode == 'bus'
assert schedule.modes() == {'bus'}
assert schedule.mode_graph_map() == {
'bus': {('3', '4'), ('2', '3'), ('1', '2'), ('6', '7'), ('5', '6'), ('7', '8')}}
schedule.apply_attributes_to_routes({'1': {'mode': 'new_bus'}})
assert schedule.route('1').mode == 'new_bus'
assert schedule.modes() == {'bus', 'new_bus'}
assert schedule['service'].modes() == {'bus', 'new_bus'}
assert schedule.mode_graph_map() == {'bus': {('7', '8'), ('6', '7'), ('5', '6')},
'new_bus': {('3', '4'), ('1', '2'), ('2', '3')}}
assert schedule['service'].mode_graph_map() == {'bus': {('6', '7'), ('7', '8'), ('5', '6')},
'new_bus': {('3', '4'), ('2', '3'), ('1', '2')}}
def test_applying_attributes_changing_id_to_route_throws_error(schedule):
assert '1' in schedule._graph.graph['routes']
assert schedule._graph.graph['routes']['1']['id'] == '1'
assert schedule.route('1').id == '1'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_routes({'1': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def test_applying_attributes_to_stop(schedule):
assert schedule._graph.nodes['5']['name'] == ''
assert schedule.stop('5').name == ''
schedule.apply_attributes_to_stops({'5': {'name': 'new_name'}})
assert schedule._graph.nodes['5']['name'] == 'new_name'
assert schedule.stop('5').name == 'new_name'
def test_applying_attributes_changing_id_to_stop_throws_error(schedule):
assert '5' in schedule._graph.nodes
assert schedule._graph.nodes['5']['id'] == '5'
assert schedule.stop('5').id == '5'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_routes({'5': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def change_name(attrib):
return 'new_name'
def test_applying_function_to_services(schedule):
schedule.apply_function_to_services(function=change_name, location='name')
assert schedule._graph.graph['services']['service']['name'] == 'new_name'
assert schedule['service'].name == 'new_name'
def test_applying_function_to_routes(schedule):
schedule.apply_function_to_routes(function=change_name, location='route_short_name')
for route in schedule.routes():
assert schedule._graph.graph['routes'][route.id]['route_short_name'] == 'new_name'
assert route.route_short_name == 'new_name'
def test_applying_function_to_stops(schedule):
schedule.apply_function_to_stops(function=change_name, location='name')
for stop in schedule.stops():
assert stop.name == 'new_name'
assert schedule._graph.nodes[stop.id]['name'] == 'new_name'
def test_adding_service(schedule, service):
service.reindex('different_service')
service.route('1').reindex('different_service_1')
service.route('2').reindex('different_service_2')
schedule.add_service(service)
assert set(schedule.route_ids()) == {'1', '2', 'different_service_1', 'different_service_2'}
assert set(schedule.service_ids()) == {'service', 'different_service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service',
'different_service_1': 'different_service', 'different_service_2': 'different_service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2'],
'different_service': ['different_service_1', 'different_service_2']})
def test_adding_service_with_clashing_route_ids(schedule, service):
service.reindex('different_service')
schedule.add_service(service)
assert set(schedule.route_ids()) == {'1', '2', 'different_service_1', 'different_service_2'}
assert set(schedule.service_ids()) == {'service', 'different_service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service',
'different_service_1': 'different_service', 'different_service_2': 'different_service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2'],
'different_service': ['different_service_1', 'different_service_2']})
def test_adding_service_with_clashing_id_throws_error(schedule, service):
with pytest.raises(ServiceIndexError) as e:
schedule.add_service(service)
assert 'already exists' in str(e.value)
def test_adding_service_with_clashing_stops_data_does_not_overwrite_existing_stops(schedule):
expected_stops_data = {
'5': {'services': {'service', 'some_id'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service', 'some_id'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service', 'some_id'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
assert r.ordered_stops == ['1', '2', '5']
s = Service(id='some_id', routes=[r])
schedule.add_service(s, force=True)
assert_semantically_equal(dict(s.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(s.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'some_id', 'service'}})
assert_semantically_equal(s.graph()['2']['5'], {'routes': {'3'}, 'services': {'some_id'}})
def test_adding_service_with_clashing_stops_data_without_force_flag_throws_error(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
with pytest.raises(ConflictingStopData) as e:
schedule.add_service(Service(id='some_id', routes=[r]))
assert 'The following stops would inherit data' in str(e.value)
def test_removing_service(schedule):
schedule.remove_service('service')
assert not set(schedule.route_ids())
assert not set(schedule.service_ids())
assert not schedule._graph.graph['route_to_service_map']
assert not schedule._graph.graph['service_to_route_map']
def test_adding_route(schedule, route):
route.reindex('new_id')
schedule.add_route('service', route)
assert set(schedule.route_ids()) == {'1', '2', 'new_id'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service', 'new_id': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2', 'new_id']})
def test_adding_route_with_clashing_id(schedule, route):
schedule.add_route('service', route)
assert set(schedule.route_ids()) == {'1', '2', 'service_3'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service', 'service_3': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2', 'service_3']})
def test_adding_route_to_non_existing_service_throws_error(schedule, route):
with pytest.raises(ServiceIndexError) as e:
schedule.add_route('service_that_doesnt_exist', route)
assert 'does not exist' in str(e.value)
def test_creating_a_route_to_add_using_id_references_to_existing_stops_inherits_schedule_stops_data(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=['1', '2', '5']
)
assert r.ordered_stops == ['1', '2', '5']
assert_semantically_equal(dict(r._graph.nodes(data=True)),
{'1': {'routes': {'3'}}, '2': {'routes': {'3'}}, '5': {'routes': {'3'}}})
assert_semantically_equal(r._graph.edges(data=True)._adjdict,
{'1': {'2': {'routes': {'3'}}}, '2': {'5': {'routes': {'3'}}}, '5': {}})
schedule.add_route('service', r)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_creating_a_route_to_add_giving_existing_schedule_stops(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[schedule.stop('1'), schedule.stop('2'), schedule.stop('5')]
)
assert r.ordered_stops == ['1', '2', '5']
assert_semantically_equal(dict(r._graph.nodes(data=True)),
{'1': {'routes': {'3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'routes': {'3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'5': {'routes': {'3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()}})
assert_semantically_equal(r._graph.edges(data=True)._adjdict,
{'1': {'2': {'routes': {'3'}}}, '2': {'5': {'routes': {'3'}}}, '5': {}})
schedule.add_route('service', r)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_adding_route_with_clashing_stops_data_does_not_overwrite_existing_stops(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
assert r.ordered_stops == ['1', '2', '5']
schedule.add_route('service', r, force=True)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_adding_route_with_clashing_stops_data_only_flags_those_that_are_actually_different(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=4, y=2, epsg='epsg:27700', name='')]
)
assert r.ordered_stops == ['1', '2', '5']
with pytest.raises(ConflictingStopData) as e:
schedule.add_route('service', r)
assert "The following stops would inherit data currently stored under those Stop IDs in the Schedule: " \
"['1', '2']" in str(e.value)
def test_adding_route_with_clashing_stops_data_without_force_flag_throws_error(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
with pytest.raises(ConflictingStopData) as e:
schedule.add_route('service', r)
assert 'The following stops would inherit data' in str(e.value)
def test_extracting_epsg_from_an_intermediate_route_gives_none():
# intermediate meaning not belonging to a schedule yet but referring to stops in a schedule
r = Route(
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=['S1', 'S2', 'S3']
)
assert r.epsg is None
def test_removing_route(schedule):
schedule.remove_route('2')
assert set(schedule.route_ids()) == {'1'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1']})
def test_removing_route_updates_services_on_nodes_and_edges(schedule):
schedule.remove_route('2')
assert_semantically_equal(dict(schedule.graph().nodes(data=True)),
{'5': {'services': set(), 'routes': set(), 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set()},
'6': {'services': set(), 'routes': set(), 'id': '6', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.766825803756994, 'lon': -7.557148039524952,
's2_id': 5205973754090365183, 'additional_attributes': set()},
'7': {'services': set(), 'routes': set(), 'id': '7', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set()},
'8': {'services': set(), 'routes': set(), 'id': '8', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set()},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.76683608549253,
'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.76682779861249,
'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.766825803756994,
'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.766856648946295,
'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()}})
assert_semantically_equal(schedule.graph().edges(data=True)._adjdict,
{'5': {'6': {'services': set(), 'routes': set()}},
'6': {'7': {'services': set(), 'routes': set()}},
'7': {'8': {'services': set(), 'routes': set()}}, '8': {},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}},
'2': {'3': {'services': {'service'}, 'routes': {'1'}}}, '4': {}})
def test_removing_stop(schedule):
schedule.remove_stop('5')
assert {stop.id for stop in schedule.stops()} == {'1', '3', '4', '7', '8', '6', '2'}
def test_removing_unused_stops(schedule):
schedule.remove_route('1')
schedule.remove_unsused_stops()
assert {stop.id for stop in schedule.stops()} == {'6', '8', '5', '7'}
def test_iter_stops_returns_stops_objects(test_service, different_test_service):
schedule = Schedule(services=[test_service, different_test_service], epsg='epsg:4326')
assert set([stop.id for stop in schedule.stops()]) == {'0', '1', '2', '3', '4'}
assert all([isinstance(stop, Stop) for stop in schedule.stops()])
def test_read_matsim_schedule_returns_expected_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
epsg='epsg:27700')
correct_services = Service(id='10314', routes=[
Route(
route_short_name='12', id='VJbd8660f05fe6f744e58a66ae12bd66acbca88b98',
mode='bus',
stops=[Stop(id='26997928P', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700'),
Stop(id='26997928P.link:1', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700')],
route=['1'],
trips={'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'],
'vehicle_id': ['veh_0_bus']},
arrival_offsets=['00:00:00', '00:02:00'],
departure_offsets=['00:00:00', '00:02:00']
)
])
for val in schedule.services():
assert val == correct_services
assert_semantically_equal(schedule.stop_to_service_ids_map(),
{'26997928P.link:1': {'10314'}, '26997928P': {'10314'}})
assert_semantically_equal(schedule.stop_to_route_ids_map(),
{'26997928P': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'26997928P.link:1': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'}})
assert_semantically_equal(schedule.route('VJbd8660f05fe6f744e58a66ae12bd66acbca88b98').trips,
{'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'], 'vehicle_id': ['veh_0_bus']})
assert_semantically_equal(
dict(schedule.graph().nodes(data=True)),
{'26997928P': {'services': {'10314'}, 'routes': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'id': '26997928P', 'x': 528464.1342843144, 'y': 182179.7435136598, 'epsg': 'epsg:27700',
'name': '<NAME> (Stop P)', 'lat': 51.52393050617373, 'lon': -0.14967658860132668,
's2_id': 5221390302759871369, 'additional_attributes': {'name', 'isBlocking'},
'isBlocking': 'false'},
'26997928P.link:1': {'services': {'10314'}, 'routes': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'id': '26997928P.link:1', 'x': 528464.1342843144, 'y': 182179.7435136598,
'epsg': 'epsg:27700', 'name': 'Brunswick Place (Stop P)', 'lat': 51.52393050617373,
'lon': -0.14967658860132668, 's2_id': 5221390302759871369,
'additional_attributes': {'name', 'linkRefId', 'isBlocking'}, 'linkRefId': '1',
'isBlocking': 'false'}}
)
def test_reading_vehicles_with_a_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
path_to_vehicles=pt2matsim_vehicles_file,
epsg='epsg:27700')
assert_semantically_equal(schedule.vehicles, {'veh_0_bus': {'type': 'bus'}})
assert_semantically_equal(schedule.vehicle_types['bus'], {
'capacity': {'seats': {'persons': '71'}, 'standingRoom': {'persons': '1'}},
'length': {'meter': '18.0'},
'width': {'meter': '2.5'},
'accessTime': {'secondsPerPerson': '0.5'},
'egressTime': {'secondsPerPerson': '0.5'},
'doorOperation': {'mode': 'serial'},
'passengerCarEquivalents': {'pce': '2.8'}})
def test_reading_vehicles_after_reading_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
path_to_vehicles=pt2matsim_vehicles_file,
epsg='epsg:27700')
assert_semantically_equal(schedule.vehicles, {'veh_0_bus': {'type': 'bus'}})
assert_semantically_equal(schedule.vehicle_types['bus'], {
'capacity': {'seats': {'persons': '71'}, 'standingRoom': {'persons': '1'}},
'length': {'meter': '18.0'},
'width': {'meter': '2.5'},
'accessTime': {'secondsPerPerson': '0.5'},
'egressTime': {'secondsPerPerson': '0.5'},
'doorOperation': {'mode': 'serial'},
'passengerCarEquivalents': {'pce': '2.8'}})
def test_is_strongly_connected_with_strongly_connected_schedule(strongly_connected_schedule):
assert strongly_connected_schedule.is_strongly_connected()
def test_is_strongly_connected_with_not_strongly_connected_schedule(schedule):
assert not schedule.is_strongly_connected()
def test_has_self_loops_with_self_has_self_looping_schedule(self_looping_route):
s = Schedule('epsg:27700', [Service(id='service', routes=[self_looping_route])])
assert s.has_self_loops()
def test_has_self_loops_returns_self_looping_stops(self_looping_route):
s = Schedule('epsg:27700', [Service(id='service', routes=[self_looping_route])])
loop_nodes = s.has_self_loops()
assert loop_nodes == ['1']
def test_has_self_loops_with_non_looping_routes(schedule):
assert not schedule.has_self_loops()
def test_validity_of_services(self_looping_route, route):
s = Schedule('epsg:27700', [Service(id='1', routes=[self_looping_route]),
Service(id='2', routes=[route])])
assert not s['1'].is_valid_service()
assert s['2'].is_valid_service()
assert set(s.validity_of_services()) == {False, True}
def test_has_valid_services(schedule):
assert not schedule.has_valid_services()
def test_has_valid_services_with_only_valid_services(service):
s = Schedule('epsg:27700', [service])
assert s.has_valid_services()
def test_invalid_services_shows_invalid_services(service):
for route_id in service.route_ids():
service._graph.graph['routes'][route_id]['route'] = ['1']
s = Schedule('epsg:27700', [service])
assert s.invalid_services() == [service]
def test_is_valid_with_valid_schedule(service):
s = Schedule('epsg:27700', [service])
assert s.is_valid_schedule()
def test_generate_validation_report_delegates_to_method_in_schedule_operations(mocker, schedule):
mocker.patch.object(schedule_validation, 'generate_validation_report')
schedule.generate_validation_report()
schedule_validation.generate_validation_report.assert_called_once()
def test_build_graph_builds_correct_graph(strongly_connected_schedule):
g = strongly_connected_schedule.graph()
assert_semantically_equal(dict(g.nodes(data=True)),
{'5': {'services': {'service'}, 'routes': {'2'}, 'id': '5', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set(), 'name': 'Stop_5'},
'2': {'services': {'service'}, 'routes': {'1', '2'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.766825803756994, 'lon': -7.557148039524952,
's2_id': 5205973754090365183, 'additional_attributes': set(), 'name': 'Stop_2'},
'7': {'services': {'service'}, 'routes': {'2'}, 'id': '7', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set(), 'name': 'Stop_7'},
'8': {'services': {'service'}, 'routes': {'2'}, 'id': '8', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set(), 'name': 'Stop_8'},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set(), 'name': 'Stop_3'},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set(), 'name': 'Stop_1'},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set(), 'name': 'Stop_4'}})
assert_semantically_equal(g.edges(data=True)._adjdict,
{'5': {'2': {'services': {'service'}, 'routes': {'2'}}},
'2': {'7': {'services': {'service'}, 'routes': {'2'}},
'3': {'services': {'service'}, 'routes': {'1'}}},
'7': {'8': {'services': {'service'}, 'routes': {'2'}}},
'8': {'5': {'services': {'service'}, 'routes': {'2'}}},
'4': {'1': {'services': {'service'}, 'routes': {'1'}}},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}}})
def test_building_trips_dataframe(schedule):
df = schedule.route_trips_with_stops_to_dataframe()
correct_df = DataFrame({'departure_time': {0: Timestamp('1970-01-01 13:00:00'), 1: Timestamp('1970-01-01 13:05:00'),
2: Timestamp('1970-01-01 13:09:00'), 3: Timestamp('1970-01-01 13:30:00'),
4: Timestamp('1970-01-01 13:35:00'), 5: Timestamp('1970-01-01 13:39:00'),
6: Timestamp('1970-01-01 11:00:00'), 7: Timestamp('1970-01-01 11:05:00'),
8: | Timestamp('1970-01-01 11:09:00') | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""Constants and functions in common across modules."""
# standard library imports
import contextlib
import mmap
import os
import sys
import tempfile
from pathlib import Path
# third-party imports
import numpy as np
import pandas as pd
import xxhash
from loguru import logger as loguru_logger
from memory_tempfile import MemoryTempfile
# global constants
NAME = "azulejo"
DEFAULT_PARQUET_COMPRESSION = "ZSTD"
# Sizes and minimum read times with various compressions
# for a file with one proteome on a system with M.2 SSD disk
# under pyarrow 1.0.0 into pandas 1.1.0:
# "NONE": 43MB, 1.8s
# "ZSTD": 13M, 1.8s
# "SNAPPY": 29 MB, 1.8s
# "BROTLI": 13 MB, 1.9s
# "LZ4": 23MB, (disabled under pyarrow 1.0.0, was about like brotli under 0.17)
# "GZIP": 14 MB, 2.1 s
# "LZO": not supported
# "BZ2": not supported
# In addition, the ingest process took 28.8s with None, and
# 28.4 s with ZSTD, probably due to writing less data.
# With its 70% compression factor, ZSTD can be expected to
# perform even better relative to uncompressed and snappy
# on production systems with slower disks for which
# cache is not warmed up (as mine was in this test).
# So ZSTD seems a clear choice for now.
PARQUET_EXTENSIONS = ["parquet", "pq", "parq"]
TSV_EXTENSIONS = ["tsv"]
SAVED_INPUT_FILE = "input.toml"
# Changing the extension of these files will change the type of file written.
# TSV files, though readable/editable, do not give the written values back.
# Parquet is also ~100X faster.
CLUSTER_FILETYPE = "parq"
CLUSTERS_FILE = "homology_clusters.parq"
CLUSTERSYN_FILE = "homology_clusters.syn.parq"
CLUSTER_HIST_FILE = "homology_cluster_hist.tsv"
FRAGMENTS_FILE = "fragments.tsv"
ANCHOR_HIST_FILE = "anchor_hist.tsv"
HOMOLOGY_FILE = "proteins.hom.parq"
PROTEOMES_FILE = "proteomes.tsv"
PROTEOMOLOGY_FILE = "proteomes.hom.parq"
PROTEOSYN_FILE = "proteomes.hom.syn.parq"
PROTEINS_FILE = "proteins.parq"
SYNTENY_FILE = "proteins.hom.syn.parq"
ANCHORS_FILE = "synteny_anchors.tsv"
SYNTENY_FILETYPE = "tsv"
COLLECTION_FILE = "collection.json"
COLLECTION_HOM_FILE = "collection.hom.json"
COLLECTION_SYN_FILE = "collection.hom.syn.json"
EXTERNAL_CLUSTERS_FILE = "homology_clusters-external.tsv"
# fragment-name defs
PLASTID_STARTS = ["chromop", "chl", "mt", "mi", "rh", "mu", "le", "pl"]
CHROMOSOME_SYNONYMS = ["chromosome", "chrom", "chro", "gs", "gm"]
ALTERNATE_ABBREV = "alt"
CHROMOSOME_ABBREV = "chr"
SCAFFOLD_SYNONYMS = ["scaffold", "scaf", "sca"]
SCAFFOLD_ABBREV = "sc"
# synteny codes
UNAMBIGUOUS_CODE = "U"
DISAMBIGUATED_CODE = "D"
INDIRECT_CODE = "I"
LOCALLY_UNAMBIGUOUS_CODE = "L"
NON_AMBIGUOUS_CODE = "N"
AMBIGUOUS_CODE = "A"
CODE_DICT = {
UNAMBIGUOUS_CODE: "unambiguous",
DISAMBIGUATED_CODE: "disambiguated",
INDIRECT_CODE: "indirectly unambiguous",
LOCALLY_UNAMBIGUOUS_CODE: "locally unambiguous",
NON_AMBIGUOUS_CODE: "non-ambiguous",
AMBIGUOUS_CODE: "ambiguous",
}
DIRECTIONAL_CATEGORY = pd.CategoricalDtype(categories=["-", "+"])
YES_NO = pd.CategoricalDtype(categories=["y", "n"])
SYNTENY_CATEGORY = pd.CategoricalDtype(categories=CODE_DICT.keys())
DEFAULT_DTYPE = pd.UInt32Dtype()
NONDEFAULT_DTYPES = {
"anchor.subframe.ok": pd.BooleanDtype(),
"code": SYNTENY_CATEGORY,
"fasta_url": pd.StringDtype(),
"gff_url": pd.StringDtype(),
"frag.direction": DIRECTIONAL_CATEGORY,
"frag.id": pd.CategoricalDtype(),
"frag.is_chr": YES_NO,
"frag.is_plas": YES_NO,
"frag.is_scaf": YES_NO,
"frag.len": pd.UInt64Dtype(),
"frag.orig_id": pd.StringDtype(),
"frag.start": pd.UInt64Dtype(),
"gff.feature": pd.CategoricalDtype(),
"gff.id": pd.CategoricalDtype(),
"path": pd.CategoricalDtype(),
"phy.*": pd.CategoricalDtype(),
"preference": pd.StringDtype(),
"prot.m_start": pd.BooleanDtype(),
"prot.no_stop": pd.BooleanDtype(),
"prot.seq": pd.StringDtype(),
"syn.anchor.direction": DIRECTIONAL_CATEGORY,
"syn.code": SYNTENY_CATEGORY,
"syn.orthogenomic_pct": "float64",
"syn.hash.footprint": pd.UInt32Dtype(),
"syn.hash.direction": DIRECTIONAL_CATEGORY,
"val": "float64",
# patterns are matched in order after checking for exact matches
"patterns": [
{"start": "phy.", "type": pd.CategoricalDtype()},
{"start": "pct_", "end": "_pct", "type": "float64"},
{"start": "memb", "type": pd.StringDtype()},
],
}
MEGABYTES = 1024.0 * 1024.0
INSTALL_ENVIRON_VAR = ( # installs go into "/bin" and other subdirs of this directory
NAME.upper() + "_INSTALL_DIR"
)
if INSTALL_ENVIRON_VAR in os.environ:
INSTALL_PATH = Path(os.environ[INSTALL_ENVIRON_VAR])
else:
INSTALL_PATH = Path(sys.executable).parent.parent
# logger class for use in testing
class PrintLogger:
"""This logger only prints, for testing only."""
def __init__(self, level):
try:
self.level = int(level)
except ValueError:
if level.lower() == "debug":
self.level = 10
elif level.lower() == "info":
self.level = 20
elif level.lower() == "warning":
self.level = 30
elif level.lower() == "error":
self.level = 40
else:
self.level = 20
def debug(self, message):
if self.level <= 10:
print(f"Debug: {message}")
def info(self, message):
if self.level <= 20:
print(message)
def warning(self, message):
if self.level <= 30:
print(f"Warning: {message}")
def error(self, message):
if self.level <= 40:
print(f"ERROR: {message}")
def is_writable(dev):
"""Returns whether a device is writable or not."""
try:
testdir = tempfile.mkdtemp(prefix=NAME + "-", dir=append_slash(dev))
Path(testdir).rmdir()
except OSError:
return False
return True
# global variables that are set depending on envvars
# Don't use loguru, just print. Useful for testing.
if "LOG_TO_PRINT" in os.environ:
logger = PrintLogger(os.environ["LOG_TO_PRINT"])
else:
logger = loguru_logger
# Update period on spinners. Also useful for testing.
if "SPINNER_UPDATE_PERIOD" in os.environ:
try:
SPINNER_UPDATE_PERIOD = float(os.environ["SPINNER_UPDATE_PERIOD"])
except ValueError:
SPINNER_UPDATE_PERIOD = 5.0
else:
SPINNER_UPDATE_PERIOD = 1.0
# Fast scratch disk (e.g., SSD or /dev/shm), if other than /tmp
if "SCRATCH_DEV" in os.environ and is_writable(os.environ["SCRATCH_DEV"]):
SCRATCH_DEV = os.environ["SCRATCH_DEV"]
else:
SCRATCH_DEV = "/tmp"
# Build disk for installer, needs to allow exe bit set
if "BUILD_DEV" in os.environ and is_writable(os.environ["BUILD_DEV"]):
BUILD_DEV = os.environ["BUILD_DEV"]
elif sys.platform == "linux":
try:
BUILD_DEV = MemoryTempfile(
preferred_paths=["/run/user/{uid}"], fallback=True
).get_usable_mem_tempdir_paths()[0]
except AttributeError:
BUILD_DEV = "/tmp"
else:
BUILD_DEV = "/tmp"
def enforce_canonical_dtypes(frame):
"""Enforce that dtypes of columns meet expectations."""
for col in frame.columns:
if col.startswith("tmp."):
continue
column_type = frame[col].dtype
should_be_type = DEFAULT_DTYPE
if col in NONDEFAULT_DTYPES:
should_be_type = NONDEFAULT_DTYPES[col]
else:
for pattern_dict in NONDEFAULT_DTYPES["patterns"]:
if "start" in pattern_dict:
if col.startswith(pattern_dict["start"]):
should_be_type = pattern_dict["type"]
break
if "end" in pattern_dict:
if col.endswith(pattern_dict["end"]):
should_be_type = pattern_dict["type"]
break
try:
is_correct_type = column_type == should_be_type
except TypeError:
is_correct_type = False
if not is_correct_type:
try:
frame[col] = frame[col].astype(should_be_type)
except (ValueError, TypeError) as cast_err:
logger.warning(f"Cannot cast {col} to {should_be_type}")
logger.warning(cast_err)
return frame
def free_mb(dev):
""""Return the number of free MB on dev."""
fs_stats = os.statvfs(dev)
free_space_mb = int(
np.rint((fs_stats.f_bsize * fs_stats.f_bfree) / MEGABYTES)
)
return free_space_mb
def append_slash(dev):
"""Append a final slash, if needed."""
if not dev.endswith("/"):
dev += "/"
return dev
def disk_usage_mb(pathstr):
"""Calculate the size used in MB by a path."""
path = Path(pathstr)
if not path.exists():
logger.error(f"Path '{path}' does not exist for disk usage")
return 0
total_size = np.array(
[p.stat().st_size for p in path.rglob("*") if not p.is_symlink()]
).sum()
return int(np.rint(total_size / MEGABYTES))
class MinSpaceTracker:
"""Keep track of the minimum space available on a (memory) device."""
def __init__(self, device):
"""Remember the device and initialize minimum space."""
self.device = Path(device)
self.initial_space = free_mb(self.device)
self.min_space = self.initial_space
def check(self):
"""Update the minimimum space available."""
self.min_space = min(self.min_space, free_mb(self.device))
def report_min(self):
"""Report the minimum space available."""
return self.min_space
def report_used(self):
"""Report change from initial space."""
return self.initial_space - self.min_space
def cluster_set_name(stem, identity):
"""Get a setname that specifies the %identity value.."""
if identity == 1.0:
digits = "10000"
else:
digits = f"{identity:.4f}"[2:]
return f"{stem}-nr-{digits}"
def get_paths_from_file(filepath, must_exist=True):
"""Given a string filepath,, return the resolved path and parent."""
inpath = Path(filepath).expanduser().resolve()
if must_exist and not inpath.exists():
raise FileNotFoundError(filepath)
dirpath = inpath.parent
return inpath, dirpath
class TrimmableMemoryMap:
"""A memory-mapped file that can be resized at the end."""
def __init__(self, filepath, access=mmap.ACCESS_WRITE):
"""Open the memory-mapped file."""
self.orig_size = None
self.size = None
self.map_obj = None
self.access = access
self.filehandle = open(filepath, "r+b")
def trim(self, start, end):
"""Trim the memory map and mark the nex size."""
self.map_obj.move(start, end, self.orig_size - end)
self.size -= end - start
return self.size
@contextlib.contextmanager
def map(self):
"""Open a memory-mapped view of filepath."""
try:
self.map_obj = mmap.mmap(
self.filehandle.fileno(), 0, access=self.access
)
self.orig_size = self.map_obj.size()
self.size = self.orig_size
yield self.map_obj
finally:
if self.access == mmap.ACCESS_WRITE:
self.map_obj.flush()
self.map_obj.close()
self.filehandle.truncate(self.size)
self.filehandle.close()
else:
self.map_obj.close()
self.filehandle.close()
def dotpath_to_path(dotpath):
"""Return a dot-separated pathstring as a path."""
return Path("/".join(dotpath.split(".")))
def fasta_records(filepath):
"""Count the number of records in a FASTA file."""
count = 0
next_pos = 0
angle_bracket = bytes(">", "utf-8")
memory_map = TrimmableMemoryMap(filepath, access=mmap.ACCESS_READ)
with memory_map.map() as mem_map:
size = memory_map.size
next_pos = mem_map.find(angle_bracket, next_pos)
while next_pos != -1 and next_pos < size:
count += 1
next_pos = mem_map.find(angle_bracket, next_pos + 1)
return count, size
def protein_file_stats_filename(setname):
"""Return the name of the protein stat file."""
if setname is None:
return "protein_files.tsv"
return f"{setname}-protein_files.tsv"
def protein_properties_filename(filestem):
"""Return the name of the protein properties file."""
if filestem is None:
return "proteins.tsv"
return f"{filestem}-proteins.tsv"
def homo_degree_dist_filename(filestem):
"""Return the name of the homology degree distribution file."""
return f"{filestem}-degreedist.tsv"
def group_key_filename(members):
"""Return the name of the group key file."""
return f"groupkeys-{members}.tsv"
def sort_proteome_frame(frame):
"""Sort a proteome frame by preference and frag.max and renumber."""
frame = frame.copy()
if frame.index.name == "path":
frame["path"] = frame.index
frame.sort_values(
by=["preference", "frag.max"], ascending=[True, False], inplace=True
)
frame["order"] = range(len(frame))
frame.set_index("order", inplace=True)
return frame
def remove_tmp_columns(frame):
"""Remove any columns in a data frame that begin with 'tmp.'."""
drop_cols = [col for col in frame.columns if col.startswith("tmp.")]
if len(drop_cols) != 0:
return frame.drop(drop_cols, axis=1)
return frame
def write_tsv_or_parquet(
frame,
filepath,
compression=DEFAULT_PARQUET_COMPRESSION,
float_format="%.2f",
desc=None,
remove_tmp=True,
sort_cols=True,
enforce_types=True,
):
"""Write either a TSV or a parquet file by file extension."""
filepath = Path(filepath)
ext = filepath.suffix.lstrip(".")
if desc is not None:
file_desc = f"{desc} file"
logger.debug(f'Writing {file_desc} "{filepath}')
if remove_tmp:
frame = remove_tmp_columns(frame)
if enforce_types:
frame = enforce_canonical_dtypes(frame)
if sort_cols:
frame = frame[sorted(frame.columns)]
if ext in PARQUET_EXTENSIONS:
frame.to_parquet(filepath, compression=compression)
elif ext in TSV_EXTENSIONS:
frame.to_csv(filepath, sep="\t", float_format=float_format)
else:
logger.error(f"Unrecognized file extension {ext} in {filepath}")
sys.exit(1)
def read_tsv_or_parquet(filepath):
"""Read either a TSV or a parquet file by file extension."""
filepath = Path(filepath)
if not filepath.exists():
logger.error(f'File "{filepath}" does not exist.')
sys.exit(1)
ext = filepath.suffix.lstrip(".")
if ext in PARQUET_EXTENSIONS:
return pd.read_parquet(filepath)
if ext in TSV_EXTENSIONS:
frame = pd.read_csv(filepath, sep="\t", index_col=0).convert_dtypes()
return enforce_canonical_dtypes(frame)
logger.error(f"Unrecognized file extensions {ext} in {filepath}")
sys.exit(1)
def log_and_add_to_stats(stats, new_stats):
"""Print stats info and write to stats file."""
with pd.option_context(
"display.max_rows",
None,
"display.max_columns",
None,
"display.float_format",
"{:,.1f}%".format,
):
logger.info(new_stats)
overlap_cols = list(set(stats.columns) & set(new_stats.columns))
return pd.concat([stats.drop(columns=overlap_cols), new_stats], axis=1)
def bool_to_y_or_n(bool_arr):
"""Convert boolean to T/F value"""
boolean_dict = {True: "y", False: "n"}
bool_ser = pd.Series(bool_arr)
return bool_ser.map(boolean_dict)
def y_or_n_to_bool(bool_ser):
"""Convert boolean to T/F value"""
tf_dict = {"y": True, False: "n"}
return bool_ser.map(tf_dict).astype(bool)
def hash_array(kmer):
"""Return a hash of a numpy array."""
return xxhash.xxh32_intdigest(kmer.tobytes())
def calculate_adjacency_group(index_series, frag_series):
"""Calculate an adjacency group numger."""
index_fr = pd.DataFrame({"index": index_series, "fragment": frag_series})
n_prot = len(index_fr)
adj_gr_count = 0
was_adj = False
index_fr["i"] = range(n_prot)
adj_group = np.array([np.nan] * n_prot)
for unused_group, subframe in index_fr.groupby(by=["fragment"]):
if len(subframe) == 1:
continue
last_pos = -2
last_row = None
if was_adj:
adj_gr_count += 1
was_adj = False
for unused_i, row in subframe.iterrows():
row_no = row["i"]
if row["index"] == last_pos + 1:
if not was_adj:
adj_group[last_row] = adj_gr_count
was_adj = True
adj_group[row_no] = adj_gr_count
else:
if was_adj:
adj_gr_count += 1
was_adj = False
last_pos = row["index"]
last_row = row_no
if was_adj:
adj_gr_count += 1
adj_arr = pd.Series(
adj_group, dtype= | pd.UInt32Dtype() | pandas.UInt32Dtype |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import cv2
import logging
project_path = '/Downloads/GL'
def load_train_test_data():
pathToTrainData = 'Dataset/Car Images/Train Images'
cars_train_data = load_data(pathToTrainData)
logging.info("cars_train_data loaded and built successfully")
cars_train_data.to_csv(r'references/cars_train_data.csv', index=False)
logging.info("cars_train_data saved successfully")
print(cars_train_data.head())
print(cars_train_data.info())
pathToTestData = 'Dataset/Car Images/Test Images'
cars_test_data = load_data(pathToTestData)
print(cars_test_data.head())
print(cars_test_data.info())
logging.info("cars_test_data loaded and built successfully")
cars_test_data.to_csv(r'references/cars_test_data.csv', index=False)
logging.info("cars_test_data saved successfully")
cars_train_data.sort_values(['imageName'],axis=0,ascending=[True],inplace=True)
cars_test_data.sort_values(['imageName'],axis=0,ascending=[True],inplace=True)
logging.info("cars train and test data sorted successfully")
logging.info('Renaming imageName to match to Annotations Data Set')
cars_train_data.rename(columns = {'imageName': 'Image Name'},inplace = True)
cars_test_data.rename(columns = {'imageName': 'Image Name'},inplace = True)
print(cars_train_data.head())
print(cars_test_data.head())
return cars_train_data, cars_test_data
def load_data(pathToData):
path = os.getcwd()
print(path)
# os.chdir(project_path)
# print(os.getcwd())
# Importing the data set
data = pd.DataFrame(columns=['imageName', 'imagePath', 'class', 'height', 'width'])
for dirname, _, filenames in os.walk(pathToData):
for filename in filenames:
path = os.path.join(dirname, filename)
img_name = os.path.split(path)[1]
if img_name != '.DS_Store':
img = cv2.imread(path)
height, width, channel = img.shape
class_label = dirname.split('/')[-1]
data = data.append(
{'imageName': img_name, 'imagePath': path, 'class': class_label, 'height': height, 'width': width},
ignore_index=True)
logging.info("Data loaded and built successfully")
return data
def load_train_test_annotations():
pathToAnotations ='Dataset/Annotations'
cars_train_annotations = | pd.read_csv(pathToAnotations+'/Train Annotations.csv') | pandas.read_csv |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import Series, date_range
import pandas._testing as tm
from pandas.tseries.offsets import BDay
class TestTruncate:
def test_truncate(self, datetime_series):
offset = BDay()
ts = datetime_series[::3]
start, end = datetime_series.index[3], datetime_series.index[6]
start_missing, end_missing = datetime_series.index[2], datetime_series.index[7]
# neither specified
truncated = ts.truncate()
tm.assert_series_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
tm.assert_series_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
tm.assert_series_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
tm.assert_series_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
tm.assert_series_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
tm.assert_series_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
tm.assert_series_equal(truncated, expected)
# corner case, empty series returned
truncated = ts.truncate(after=datetime_series.index[0] - offset)
assert len(truncated) == 0
truncated = ts.truncate(before=datetime_series.index[-1] + offset)
assert len(truncated) == 0
msg = "Truncate: 1999-12-31 00:00:00 must be after 2000-02-14 00:00:00"
with pytest.raises(ValueError, match=msg):
ts.truncate(
before=datetime_series.index[-1] + offset,
after=datetime_series.index[0] - offset,
)
def test_truncate_nonsortedindex(self):
# GH#17935
s = pd.Series(["a", "b", "c", "d", "e"], index=[5, 3, 2, 9, 0])
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
s.truncate(before=3, after=9)
rng = pd.date_range("2011-01-01", "2012-01-01", freq="W")
ts = pd.Series(np.random.randn(len(rng)), index=rng)
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
ts.sort_values(ascending=False).truncate(before="2011-11", after="2011-12")
@pytest.mark.parametrize(
"before, after, indices",
[(1, 2, [2, 1]), (None, 2, [2, 1, 0]), (1, None, [3, 2, 1])],
)
@pytest.mark.parametrize("klass", [pd.Int64Index, pd.DatetimeIndex])
def test_truncate_decreasing_index(self, before, after, indices, klass):
# https://github.com/pandas-dev/pandas/issues/33756
idx = klass([3, 2, 1, 0])
if klass is pd.DatetimeIndex:
before = pd.Timestamp(before) if before is not None else None
after = pd.Timestamp(after) if after is not None else None
indices = [pd.Timestamp(i) for i in indices]
values = pd.Series(range(len(idx)), index=idx)
result = values.truncate(before=before, after=after)
expected = values.loc[indices]
tm.assert_series_equal(result, expected)
def test_truncate_datetimeindex_tz(self):
# GH 9243
idx = date_range("4/1/2005", "4/30/2005", freq="D", tz="US/Pacific")
s = Series(range(len(idx)), index=idx)
result = s.truncate(datetime(2005, 4, 2), datetime(2005, 4, 4))
expected = Series([1, 2, 3], index=idx[1:4])
tm.assert_series_equal(result, expected)
def test_truncate_periodindex(self):
# GH 17717
idx1 = pd.PeriodIndex(
[pd.Period("2017-09-02"), pd.Period("2017-09-02"), pd.Period("2017-09-03")]
)
series1 = pd.Series([1, 2, 3], index=idx1)
result1 = series1.truncate(after="2017-09-02")
expected_idx1 = pd.PeriodIndex(
[pd.Period("2017-09-02"), pd.Period("2017-09-02")]
)
tm.assert_series_equal(result1, pd.Series([1, 2], index=expected_idx1))
idx2 = pd.PeriodIndex(
[pd.Period("2017-09-03"), pd.Period("2017-09-02"), pd.Period("2017-09-03")]
)
series2 = | pd.Series([1, 2, 3], index=idx2) | pandas.Series |
import datetime
import os
from concurrent.futures import ProcessPoolExecutor
from math import ceil
import pandas as pd
# In[] 读入源数据
def get_source_data():
# 源数据路径
DataPath = 'data/'
# 读入源数据
off_train = pd.read_csv(os.path.join(DataPath, 'ccf_offline_stage1_train.csv'),
parse_dates=['Date_received', 'Date'])
off_train.columns = ['User_id', 'Merchant_id', 'Coupon_id', 'Discount_rate', 'Distance', 'Date_received', 'Date']
on_train = pd.read_csv(os.path.join(DataPath, 'ccf_online_stage1_train.csv'), parse_dates=['Date_received', 'Date'])
on_train.columns = ['User_id', 'Merchant_id', 'Action', 'Coupon_id', 'Discount_rate', 'Date_received', 'Date']
off_test = pd.read_csv(os.path.join(DataPath, 'ccf_offline_stage1_test_revised.csv'), parse_dates=['Date_received'])
off_test.columns = ['User_id', 'Merchant_id', 'Coupon_id', 'Discount_rate', 'Distance', 'Date_received']
print(off_train.info())
print(off_train.head(5))
return off_train, on_train, off_test
# In[] null,na 特殊处理
def null_process_offline(dataset, predict=False):
dataset.Distance.fillna(11, inplace=True)
dataset.Distance = dataset.Distance.astype(int)
dataset.Coupon_id.fillna(0, inplace=True)
dataset.Coupon_id = dataset.Coupon_id.astype(int)
dataset.Date_received.fillna(date_null, inplace=True)
dataset[['discount_rate_x', 'discount_rate_y']] = dataset[dataset.Discount_rate.str.contains(':') == True][
'Discount_rate'].str.split(':', expand=True).astype(int)
dataset['discount_rate'] = 1 - dataset.discount_rate_y / dataset.discount_rate_x
dataset.discount_rate = dataset.discount_rate.fillna(dataset.Discount_rate).astype(float)
if predict:
return dataset
else:
dataset.Date.fillna(date_null, inplace=True)
return dataset
def null_process_online(dataset):
dataset.Coupon_id.fillna(0, inplace=True)
# online.Coupon_id = online.Coupon_id.astype(int)
dataset.Date_received.fillna(date_null, inplace=True)
dataset.Date.fillna(date_null, inplace=True)
return dataset
# In[] 生成交叉训练集
def data_process(off_train, on_train, off_test):
# train feature split
# 交叉训练集一:收到券的日期大于4月14日和小于5月14日
time_range = ['2016-04-16', '2016-05-15']
dataset1 = off_train[(off_train.Date_received >= time_range[0]) & (off_train.Date_received <= time_range[1])].copy()
dataset1['label'] = 0
dataset1.loc[
(dataset1.Date != date_null) & (dataset1.Date - dataset1.Date_received <= datetime.timedelta(15)), 'label'] = 1
# 交叉训练集一特征offline:线下数据中领券和用券日期大于1月1日和小于4月13日
time_range_date_received = ['2016-01-01', '2016-03-31']
time_range_date = ['2016-01-01', '2016-04-15']
feature1_off = off_train[(off_train.Date >= time_range_date[0]) & (off_train.Date <= time_range_date[1]) | (
(off_train.Coupon_id == 0) & (off_train.Date_received >= time_range_date_received[0]) & (
off_train.Date_received <= time_range_date_received[1]))]
# 交叉训练集一特征online:线上数据中领券和用券日期大于1月1日和小于4月13日[on_train.date == 'null' to on_train.coupon_id == 0]
feature1_on = on_train[(on_train.Date >= time_range_date[0]) & (on_train.Date <= time_range_date[1]) | (
(on_train.Coupon_id == 0) & (on_train.Date_received >= time_range_date_received[0]) & (
on_train.Date_received <= time_range_date_received[1]))]
# 交叉训练集二:收到券的日期大于5月15日和小于6月15日
time_range = ['2016-05-16', '2016-06-15']
dataset2 = off_train[(off_train.Date_received >= time_range[0]) & (off_train.Date_received <= time_range[1])]
dataset2['label'] = 0
dataset2.loc[
(dataset2.Date != date_null) & (dataset2.Date - dataset2.Date_received <= datetime.timedelta(15)), 'label'] = 1
# 交叉训练集二特征offline:线下数据中领券和用券日期大于2月1日和小于5月14日
time_range_date_received = ['2016-02-01', '2016-04-30']
time_range_date = ['2016-02-01', '2016-05-15']
feature2_off = off_train[(off_train.Date >= time_range_date[0]) & (off_train.Date <= time_range_date[1]) | (
(off_train.Coupon_id == 0) & (off_train.Date_received >= time_range_date_received[0]) & (
off_train.Date_received <= time_range_date_received[1]))]
# 交叉训练集二特征online:线上数据中领券和用券日期大于2月1日和小于5月14日
feature2_on = on_train[(on_train.Date >= time_range_date[0]) & (on_train.Date <= time_range_date[1]) | (
(on_train.Coupon_id == 0) & (on_train.Date_received >= time_range_date_received[0]) & (
on_train.Date_received <= time_range_date_received[1]))]
# 测试集
dataset3 = off_test
# 测试集特征offline :线下数据中领券和用券日期大于3月15日和小于6月30日的
time_range = ['2016-03-16', '2016-06-30']
feature3_off = off_train[((off_train.Date >= time_range[0]) & (off_train.Date <= time_range[1])) | (
(off_train.Coupon_id == 0) & (off_train.Date_received >= time_range[0]) & (
off_train.Date_received <= time_range[1]))]
# 测试集特征online :线上数据中领券和用券日期大于3月15日和小于6月30日的
feature3_on = on_train[((on_train.Date >= time_range[0]) & (on_train.Date <= time_range[1])) | (
(on_train.Coupon_id == 0) & (on_train.Date_received >= time_range[0]) & (
on_train.Date_received <= time_range[1]))]
# get train feature
ProcessDataSet1 = get_features(dataset1, feature1_off, feature1_on)
ProcessDataSet2 = get_features(dataset2, feature2_off, feature2_on)
ProcessDataSet3 = get_features(dataset3, feature3_off, feature3_on)
return ProcessDataSet1, ProcessDataSet2, ProcessDataSet3
def get_features(dataset, feature_off, feature_on):
dataset = get_offline_features(dataset, feature_off)
return get_online_features(feature_on, dataset)
# In[] 定义获取feature的函数
def get_offline_features(X, offline):
# X = X[:1000]
print(len(X), len(X.columns))
temp = offline[offline.Coupon_id != 0]
coupon_consume = temp[temp.Date != date_null]
coupon_no_consume = temp[temp.Date == date_null]
user_coupon_consume = coupon_consume.groupby('User_id')
X['weekday'] = X.Date_received.dt.weekday
X['day'] = X.Date_received.dt.day
# # 距离优惠券消费次数
# temp = coupon_consume.groupby('Distance').size().reset_index(name='distance_0')
# X = pd.merge(X, temp, how='left', on='Distance')
#
# # 距离优惠券不消费次数
# temp = coupon_no_consume.groupby('Distance').size().reset_index(name='distance_1')
# X = pd.merge(X, temp, how='left', on='Distance')
#
# # 距离优惠券领取次数
# X['distance_2'] = X.distance_0 + X.distance_1
#
# # 距离优惠券消费率
# X['distance_3'] = X.distance_0 / X.distance_2
# temp = coupon_consume[coupon_consume.Distance != 11].groupby('Distance').size()
# temp['d4'] = temp.Distance.sum() / len(temp)
# X = pd.merge(X, temp, how='left', on='Distance')
'''user features'''
# 优惠券消费次数
temp = user_coupon_consume.size().reset_index(name='u2')
X = pd.merge(X, temp, how='left', on='User_id')
# X.u2.fillna(0, inplace=True)
# X.u2 = X.u2.astype(int)
# 优惠券不消费次数
temp = coupon_no_consume.groupby('User_id').size().reset_index(name='u3')
X = pd.merge(X, temp, how='left', on='User_id')
# 使用优惠券次数与没使用优惠券次数比值
X['u19'] = X.u2 / X.u3
# 领取优惠券次数
X['u1'] = X.u2.fillna(0) + X.u3.fillna(0)
# 优惠券核销率
X['u4'] = X.u2 / X.u1
# 普通消费次数
temp = offline[(offline.Coupon_id == 0) & (offline.Date != date_null)]
temp1 = temp.groupby('User_id').size().reset_index(name='u5')
X = pd.merge(X, temp1, how='left', on='User_id')
# 一共消费多少次
X['u25'] = X.u2 + X.u5
# 用户使用优惠券消费占比
X['u20'] = X.u2 / X.u25
# 正常消费平均间隔
temp = pd.merge(temp, temp.groupby('User_id').Date.max().reset_index(name='max'))
temp = pd.merge(temp, temp.groupby('User_id').Date.min().reset_index(name='min'))
temp = pd.merge(temp, temp.groupby('User_id').size().reset_index(name='len'))
temp['u6'] = ((temp['max'] - temp['min']).dt.days / (temp['len'] - 1))
temp = temp.drop_duplicates('User_id')
X = pd.merge(X, temp[['User_id', 'u6']], how='left', on='User_id')
# 优惠券消费平均间隔
temp = pd.merge(coupon_consume, user_coupon_consume.Date.max().reset_index(name='max'))
temp = pd.merge(temp, temp.groupby('User_id').Date.min().reset_index(name='min'))
temp = pd.merge(temp, temp.groupby('User_id').size().reset_index(name='len'))
temp['u7'] = ((temp['max'] - temp['min']).dt.days / (temp['len'] - 1))
temp = temp.drop_duplicates('User_id')
X = pd.merge(X, temp[['User_id', 'u7']], how='left', on='User_id')
# 15天内平均会普通消费几次
X['u8'] = X.u6 / 15
# 15天内平均会优惠券消费几次
X['u9'] = X.u7 / 15
# 领取优惠券到使用优惠券的平均间隔时间
temp = coupon_consume.copy()
temp['days'] = (temp.Date - temp.Date_received).dt.days
temp = (temp.groupby('User_id').days.sum() / temp.groupby('User_id').size()).reset_index(name='u10')
X = pd.merge(X, temp, how='left', on='User_id')
# 在15天内使用掉优惠券的值大小
X['u11'] = X.u10 / 15
# 领取优惠券到使用优惠券间隔小于15天的次数
temp = coupon_consume.copy()
temp['days'] = (temp.Date - temp.Date_received).dt.days
temp = temp[temp.days <= 15]
temp = temp.groupby('User_id').size().reset_index(name='u21')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户15天使用掉优惠券的次数除以使用优惠券的次数
X['u22'] = X.u21 / X.u2
# 用户15天使用掉优惠券的次数除以领取优惠券未消费的次数
X['u23'] = X.u21 / X.u3
# 用户15天使用掉优惠券的次数除以领取优惠券的总次数
X['u24'] = X.u21 / X.u1
# 消费优惠券的平均折率
temp = user_coupon_consume.discount_rate.mean().reset_index(name='u45')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券的最低消费折率
temp = user_coupon_consume.discount_rate.min().reset_index(name='u27')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券的最高消费折率
temp = user_coupon_consume.discount_rate.max().reset_index(name='u28')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销过的不同优惠券数量
temp = coupon_consume.groupby(['User_id', 'Coupon_id']).size()
temp = temp.groupby('User_id').size().reset_index(name='u32')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户领取所有不同优惠券数量
temp = offline[offline.Date_received != date_null]
temp = temp.groupby(['User_id', 'Coupon_id']).size().reset_index(name='u47')
X = pd.merge(X, temp, how='left', on=['User_id', 'Coupon_id'])
# 用户核销过的不同优惠券数量占所有不同优惠券的比重
X['u33'] = X.u32 / X.u47
# 用户平均每种优惠券核销多少张
X['u34'] = X.u2 / X.u47
# 核销优惠券用户-商家平均距离
temp = offline[(offline.Coupon_id != 0) & (offline.Date != date_null) & (offline.Distance != 11)]
temp = temp.groupby('User_id').Distance
temp = pd.merge(temp.count().reset_index(name='x'), temp.sum().reset_index(name='y'), on='User_id')
temp['u35'] = temp.y / temp.x
temp = temp[['User_id', 'u35']]
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券中的最小用户-商家距离
temp = coupon_consume[coupon_consume.Distance != 11]
temp = temp.groupby('User_id').Distance.min().reset_index(name='u36')
X = pd.merge(X, temp, how='left', on='User_id')
# 用户核销优惠券中的最大用户-商家距离
temp = coupon_consume[coupon_consume.Distance != 11]
temp = temp.groupby('User_id').Distance.max().reset_index(name='u37')
X = pd.merge(X, temp, how='left', on='User_id')
# 优惠券类型
discount_types = [
'0.2', '0.5', '0.6', '0.7', '0.75', '0.8', '0.85', '0.9', '0.95', '30:20', '50:30', '10:5',
'20:10', '100:50', '200:100', '50:20', '30:10', '150:50', '100:30', '20:5', '200:50', '5:1',
'50:10', '100:20', '150:30', '30:5', '300:50', '200:30', '150:20', '10:1', '50:5', '100:10',
'200:20', '300:30', '150:10', '300:20', '500:30', '20:1', '100:5', '200:10', '30:1', '150:5',
'300:10', '200:5', '50:1', '100:1',
]
X['discount_type'] = -1
for k, v in enumerate(discount_types):
X.loc[X.Discount_rate == v, 'discount_type'] = k
# 不同优惠券领取次数
temp = offline.groupby(['User_id', 'Discount_rate']).size().reset_index(name='u41')
X = pd.merge(X, temp, how='left', on=['User_id', 'Discount_rate'])
# 不同优惠券使用次数
temp = coupon_consume.groupby(['User_id', 'Discount_rate']).size().reset_index(name='u42')
X = pd.merge(X, temp, how='left', on=['User_id', 'Discount_rate'])
# 不同优惠券不使用次数
temp = coupon_no_consume.groupby(['User_id', 'Discount_rate']).size().reset_index(name='u43')
X = pd.merge(X, temp, how='left', on=['User_id', 'Discount_rate'])
# 不同打折优惠券使用率
X['u44'] = X.u42 / X.u41
# 满减类型优惠券领取次数
temp = offline[offline.Discount_rate.str.contains(':') == True]
temp = temp.groupby('User_id').size().reset_index(name='u48')
X = | pd.merge(X, temp, how='left', on='User_id') | pandas.merge |
import datetime
import string
import matplotlib.dates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from nltk import WordNetLemmatizer, LancasterStemmer, pos_tag, sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.sentiment import SentimentIntensityAnalyzer
from pandas._libs.tslibs.offsets import BDay
from sklearn import tree
from sklearn.calibration import calibration_curve
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split, learning_curve
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR, LinearSVC
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier, plot_tree
from textblob import TextBlob
from wordcloud import WordCloud
def plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
if axes is None:
_, axes = plt.subplots(1, 3, figsize=(20, 5))
axes[0].set_title(title)
if ylim is not None:
axes[0].set_ylim(*ylim)
axes[0].set_xlabel("Training examples")
axes[0].set_ylabel("Score")
train_sizes, train_scores, test_scores, fit_times, _ =\
learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fit_times_mean = np.mean(fit_times, axis=1)
fit_times_std = np.std(fit_times, axis=1)
axes[0].grid()
axes[0].fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
axes[0].fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1,
color="g")
axes[0].plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
axes[0].plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
axes[0].legend(loc="best")
axes[1].grid()
axes[1].plot(train_sizes, fit_times_mean, 'o-')
axes[1].fill_between(train_sizes, fit_times_mean - fit_times_std,
fit_times_mean + fit_times_std, alpha=0.1)
axes[1].set_xlabel("Training examples")
axes[1].set_ylabel("fit_times")
axes[1].set_title("Scalability of the model")
axes[2].grid()
axes[2].plot(fit_times_mean, test_scores_mean, 'o-')
axes[2].fill_between(fit_times_mean, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1)
axes[2].set_xlabel("fit_times")
axes[2].set_ylabel("Score")
axes[2].set_title("Performance of the model")
return plt
def create_word_cloud(text, type):
print('\nCreating word cloud...')
word_cloud = WordCloud(width=1024, height=1024, margin=0).generate(text)
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.imshow(word_cloud, interpolation='bilinear')
ax.axis("off")
ax.margins(x=0, y=0)
plt.savefig(f'wordcloud_{type}.png')
def get_stop_words(tokens):
stop_word_tokens = []
for word in tokens:
if word.startswith('//t.co/') or word.startswith('http') or word in ['RT', 'http', 'rt', 'timestamp',
'.', '[video]', 'AMP', 'and', 'at',
'for', 'from', 'the', 'this', 'is',
'it', 'jul', 'of', 'on', 'to', 'in',
'with', 2018, 'FALSE', '2018', 'amp',
'you', 'by', False, 0, 7, 12, 15,
'0', '7', '12', '15', 'inc']:
continue
elif word not in stopwords.words('english') or word not in ['RT', 'http', 'rt', 'timestamp', '.', '[video]']:
stop_word_tokens.append(word)
sentence = ' '.join(stop_word_tokens)
return sentence
def get_lemma(tokens):
lemma = WordNetLemmatizer()
lemmatized_tokens = []
for token in tokens:
temp_tokens = lemma.lemmatize(token)
lemmatized_tokens.append(temp_tokens)
return get_stop_words(lemmatized_tokens)
def get_stems(tokens):
stemmer = LancasterStemmer()
stemmed_tokens = []
for token in tokens:
for word in token:
if word[1] == 'DT' or word[1] == 'PRP' or word[1] == 'PRP$' or word[1] == 'NN' or word[1] == 'NNP' or word[1] == 'NNPS':
temp_tokens = word[0]
else:
temp_tokens = stemmer.stem(word[0])
stemmed_tokens.append(temp_tokens)
return get_lemma(stemmed_tokens)
def get_pos_tag(tokens):
pos_tokens = [pos_tag(token) for token in tokens]
return get_stems(pos_tokens)
def get_tokens(document):
sequences = sent_tokenize(document)
seq_tokens = [word_tokenize(sequence) for sequence in sequences]
no_punctuation_seq_tokens = []
for seq_token in seq_tokens:
no_punctuation_seq_tokens.append([token for token in seq_token if token not in string.punctuation])
return get_pos_tag(no_punctuation_seq_tokens)
def get_num_words(s):
return len(s.split())
def append_col(train_data):
print('\nGetting number of words in new text cells...')
word_counts = []
for index, row in train_data.iterrows():
word_counts.append(get_num_words(row['new_text']))
train_data['new_text_count'] = word_counts
return train_data
def get_bigrams(train_data):
print("\nCalculating the bigrams...")
bigram_vectorizer = CountVectorizer(ngram_range=[2, 2])
x = bigram_vectorizer.fit_transform(train_data.text)
bigram_total = bigram_vectorizer.get_feature_names()
transformer = TfidfTransformer()
mat = transformer.fit_transform(x)
bigrams = pd.DataFrame(mat.todense(), index=train_data.index, columns=bigram_vectorizer.get_feature_names())
train_data = pd.concat([train_data, bigrams], ignore_index=False, sort=False, axis=1, join="inner")
return len(bigram_total), train_data
def get_trigrams(train_data):
print("\nCalculating the trigrams...")
trigram_vectorizer = CountVectorizer(ngram_range=[3, 3])
x = trigram_vectorizer.fit_transform(train_data.text)
trigram_total = trigram_vectorizer.get_feature_names()
transformer = TfidfTransformer()
mat = transformer.fit_transform(x)
trigram = pd.DataFrame(mat.todense(), index=train_data.index, columns=trigram_vectorizer.get_feature_names())
train_data = pd.concat([train_data, trigram], ignore_index=False, sort=False, axis=1, join="inner")
return len(trigram_total), train_data
def get_bag_of_words(train_data, features, name, type):
print("\nCalculating the bag of words...")
vectorizer = CountVectorizer(max_features=features, stop_words='english')
x = vectorizer.fit_transform(train_data.text)
words = vectorizer.get_feature_names()
transformer = TfidfTransformer()
mat = transformer.fit_transform(x)
bow = pd.DataFrame(mat.todense(), index=train_data.index, columns=vectorizer.get_feature_names())
train_data = pd.concat([train_data, bow], ignore_index=False, sort=False, axis=1, join="inner")
df_total = train_data.drop(['text'], axis=1)
train_data.to_csv(f'df_{type}_{name}_total.csv')
return train_data
def plot_ngrams(ngrams):
print('\nPlotting ngrams...')
fig = plt.figure()
ax = plt.axes()
x = ['unigram', 'bigram', 'trigram']
ax.plot(x, ngrams)
ax.set_title('Number of ngrams in Stockerbot Dataset')
plt.savefig('ngrams.png')
def concat_date_time(train_data):
train_data['timestamp'] = train_data['date'].str.cat(train_data['time'], sep=' ')
return train_data
def get_vader_polarity(document):
vader = SentimentIntensityAnalyzer()
score = vader.polarity_scores(document)
return list(score.values())
def split_vader_polarity(train_data):
print('\nSplitting Vader sentiment dictionary into separate columns...')
nvs = []
Nvs = []
pvs = []
cvs = []
for v in train_data.iloc[:, 19]:
nvs.append(v[0])
Nvs.append(v[1])
pvs.append(v[2])
cvs.append(v[3])
train_data['negative_vader_score'] = nvs
train_data['neutral_vader_score'] = Nvs
train_data['positive_vader_score'] = pvs
train_data['compound_vader_score'] = cvs
return train_data
def get_textblob_polarity(document):
return TextBlob(document).sentiment.polarity
def get_decision_tree_regression(name, file):
print(f'Conducting decision tree regression on {name}\'s {file} file...')
train_data = pd.read_csv(f'df_{file}_{name}_total.csv')
train_data = train_data.drop(['Unnamed: 0'], axis=1)
train_data = train_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sort_values(by=['final_scores'])
X = train_data.iloc[:, 2:3].values.astype(float)
y = train_data.iloc[:, 3:4].values.astype(float)
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
print(f'\n{name} training set after standard scaling:')
print(X.shape, y.shape)
regr_1 = DecisionTreeRegressor(max_depth=2, max_features='auto')
regr_2 = DecisionTreeRegressor(max_depth=5, max_features='auto')
regr_1.fit(X, y)
regr_2.fit(X, y)
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
plt.figure()
plt.scatter(X, y, s=20, edgecolor='black',
c='darkorange', label='data')
plt.plot(X_test, y_1, color='cornflowerblue',
label='max_depth=2', linewidth=2)
plt.plot(X_test, y_2, color='yellowgreen', label='max_depth=5', linewidth=2)
plt.xlabel('data')
plt.ylabel('target')
plt.title(f'{name} Decision Tree Regression ({file})')
plt.legend()
plt.savefig(f'{file}_{name}_dtr.png')
return train_data
def get_comparison_calibration_classifiers(name1, file):
print(f'Conducting a comparison of calibration classifiers on {name1}\'s {file} file...')
train_data = pd.read_csv(f'df_{file}_{name1}_total.csv')
train_data = train_data.drop(['Unnamed: 0'], axis=1)
train_data = train_data.drop(['date'], axis=1)
train_data = train_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.sort_values(by=['2_SMA'])
X = train_data[['final_scores', '2_SMA', '5_SMA', '7_EMA']]
y = train_data[['sentiment']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.7)
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC()
rfc = RandomForestClassifier()
fig = plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train.values.ravel())
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else:
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name,))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.5, 1.5])
ax1.legend(loc="lower right")
ax1.set_title(f'{name1} Calibration plots (reliability curve)({file})')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.savefig(f'{file}_{name1}_ccc.png')
return train_data
def get_support_vector_regression(name, file):
print(f'Conducting support vector regression on {name}\'s {file} file...')
senti_data = pd.read_csv(f'df_{file}_{name}_total.csv')
stock_data = pd.read_csv(f'df_stock_{name}.csv')
stocks = stock_data[['date', '2_SMA', '5_SMA', '7_EMA']].copy()
train_data = senti_data[['date', 'sentiment', 'final_scores']].copy()
new = train_data['date'].str.split(' ', n=1, expand=True)
train_data['date'] = new[0]
train_data = pd.merge(train_data, stocks, on=['date', 'date'], how='left', sort=False)
train_data = train_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.fillna(method='ffill')
train_data = train_data.fillna(value=0)
train_data = train_data.sort_values(by=['final_scores'])
X = train_data.iloc[:, 2:3].values.astype(float)
y = train_data.iloc[:, 3:4].values.astype(float)
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
print(f'\n{name} training set after standard scaling:')
print(X.shape, y.shape)
svr_rbf = SVR(kernel='rbf', C=10000, gamma=0.1, epsilon=.1)
svr_lin = SVR(kernel='linear', C=10000, gamma='auto')
svr_poly = SVR(kernel='poly', C=10000, gamma='auto', degree=3, epsilon=.1,
coef0=1)
lw = 2
svrs = [svr_rbf, svr_lin, svr_poly]
kernel_label = ['RBF', 'Linear', 'Polynomial']
model_color = ['m', 'c', 'g']
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(15, 10), sharey=True)
for ix, svr in enumerate(svrs):
axes[ix].plot(X, svr.fit(X, y).predict(X), color=model_color[ix], lw=lw,
label='{} model'.format(kernel_label[ix]))
axes[ix].scatter(X[svr.support_], y[svr.support_], facecolor="none",
edgecolor=model_color[ix], s=50,
label='{} support vectors'.format(kernel_label[ix]))
axes[ix].scatter(X[np.setdiff1d(np.arange(len(X)), svr.support_)],
y[np.setdiff1d(np.arange(len(X)), svr.support_)],
facecolor="none", edgecolor="k", s=50,
label='other training data')
axes[ix].legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),
ncol=1, fancybox=True, shadow=True)
fig.text(0.5, 0.04, 'data', ha='center', va='center')
fig.text(0.06, 0.5, 'target', ha='center', va='center', rotation='vertical')
fig.suptitle(f'{name} Support Vector Regression ({file})', fontsize=14)
plt.savefig(f'{file}_{name}_swr.png')
train_data.to_csv(f'df_{file}_{name}_total.csv')
return train_data
def get_decision_tree_classifier(train_data, name, file):
print(f'Creating decision tree classifiers on {name}\'s {file} file...')
train_data = train_data.drop(['date'], axis=1)
train_data = train_data.drop(['trading_time'], axis=1)
train_data = train_data.drop(['source'], axis=1)
train_data = train_data.drop(['text'], axis=1)
sentiment = train_data.pop('sentiment')
train_data.insert(0, 'sentiment', sentiment)
y = train_data.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(train_data, y, test_size=0.33)
dtc = DecisionTreeClassifier(criterion='entropy', max_features='auto', max_depth=5, random_state=0)
print("Decision Tree classifier")
pred = dtc.fit(X_train, y_train)
predictions = pred.predict(X_test)
text_representation = tree.export_text(dtc)
with open(f'decision_tree_{file}_{name}.log', 'w') as fout:
fout.write(text_representation)
feature_names = list(train_data.columns.values)
fig = plt.figure(figsize=(15, 10))
plot_tree(dtc,
feature_names=feature_names,
class_names=["FALSE", "TRUE"],
filled=True,
fontsize=12)
plt.title(f'{file} Decision Tree for {name}')
plt.savefig(f'decision_tree_{file}_{name}.png')
fig = plt.figure(figsize=(15, 10))
con_mat = confusion_matrix(y_true=y_test, y_pred=predictions)
group_names = ['True Neg', 'False Pos', 'False Neg', 'True Pos']
group_counts = ['{0: 0.0f}'.format(value) for value in con_mat.flatten()]
group_percentages = ['{0: .2f}'.format(value) for value in con_mat.flatten() / np.sum(con_mat)]
labels = [f'{v1}\n{v2}\n{v3}' for v1, v2, v3 in zip(group_names, group_counts, group_percentages)]
labels = np.asarray(labels).reshape(2, 2)
sns.heatmap(con_mat, annot=labels, fmt='', cmap='Blues')
plt.title(f'{file} Confusion Matrix for {name}')
plt.savefig(f'confusion_matrix_{file}_{name}.png')
fig = plt.figure(figsize=(15, 10))
class_rpt = pd.DataFrame(classification_report(predictions, y_test, digits=2, output_dict=True))
class_rpt.style.background_gradient(cmap='newcmp', subset=pd.IndexSlice['0':'9', :'f1-score']).set_properties(
**{'text-align': 'center', 'font-size': '30px'})
sns.heatmap(class_rpt.iloc[:-1, :].T, annot=True)
plt.title(f'{file} Classification Report for {name}')
plt.savefig(f'classification_report_{file}_{name}.png')
def combine_stock_sentiments(name, code):
print('\nCombining extreme and blob data frames back with train_data for regressions...')
train_data = pd.read_csv(f'stockerbot_cleaned.csv')
if code == 0:
df_extreme = pd.read_csv(f'df_extreme_vader_{name}.csv')
df_extreme['date'] = pd.to_datetime(df_extreme['date'])
type = 'vader'
elif code == 1:
df_extreme = | pd.read_csv(f'df_extreme_blob_{name}.csv') | pandas.read_csv |
from torchwisdom.core.predictor import _Predictor
from .utils import *
from typing import *
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchwisdom.core.utils import DatasetCollector
import pandas as pd
__all__ = ['TabularClassifierPredictor']
class TabularSupervisedPredictor(_Predictor):
def __init__(self, model: nn.Module, data: DatasetCollector, transform=None):
super(TabularSupervisedPredictor, self).__init__(model, data, transform)
def _pre_check(self, *args: Any, **kwargs: Any) -> bool:
la, lk = len(args), len(kwargs)
if la == 0 and lk == 0:
return False
elif la > 0 and lk > 0:
return False
else:
return True
def _pre_predict(self, *args: Any, **kwargs: Any) -> torch.Tensor:
is_clean = self._pre_check(*args, **kwargs)
if is_clean:
if len(args) > 0 and len(kwargs) == 0:
data = torch.Tensor([args])
else:
if 'csv_file' in kwargs:
csv_file = kwargs.get('csv_file')
frame = pd.read_csv(csv_file)
data = torch.from_numpy(frame.values).float()
elif 'dataframe' in kwargs:
frame = kwargs.get('dataframe')
data = torch.from_numpy(frame.values).float()
elif 'tensor_data' in kwargs:
data = kwargs.get('tensor_data')
if data.dim() <2:
data = data.unsqueeze(dim=0)
elif 'numpy_data' in kwargs:
numpy_data = kwargs.get('numpy_data')
data = torch.from_numpy(numpy_data).float()
elif 'list_data' in kwargs:
list_data = kwargs.get("list_data")
data = torch.Tensor([list_data]).float()
else:
data = None
else:
data = None
return data
@staticmethod
def _clean_remove_kwargs(key, **kwargs):
if key in kwargs: kwargs.pop(key)
return kwargs
def _clean_kwargs(self, **kwargs: Any) -> Any:
kwargs = self._clean_remove_kwargs('use_topk', **kwargs)
kwargs = self._clean_remove_kwargs('kval', **kwargs)
kwargs = self._clean_remove_kwargs('target', **kwargs)
kwargs = self._clean_remove_kwargs('show_table', **kwargs)
kwargs = self._clean_remove_kwargs('feature_columns', **kwargs)
kwargs = self._clean_remove_kwargs('target_columns', **kwargs)
return kwargs
class TabularUnsupervisedPredictor(_Predictor):
def __init__(self):
super(TabularUnsupervisedPredictor, self).__init__()
class TabularClassifierPredictor(TabularSupervisedPredictor):
def __init__(self, model: nn.Module, data: DatasetCollector, transform=None):
super(TabularClassifierPredictor, self).__init__(model, data, transform)
if transform is None:
self.transform = self.data.validset_attr.transform
def _predict(self, feature: torch.Tensor):
prediction = None
if len(feature):
feature.to(self.device)
self.model = self.model.to(self.device)
self.model.eval()
with torch.no_grad():
prediction = self.model(feature)
return prediction
def _post_predict(self, prediction: torch.Tensor, use_topk: bool = False, kval: int = 5) -> Union[None, Tuple]:
is_clean = self._post_check(prediction)
if is_clean:
if not use_topk:
return self._predict_label(prediction)
else:
return self._predict_topk(prediction, kval=kval)
return None
def _post_check(self, prediction: torch.Tensor) -> bool:
# check output is clean (classfier label image, not image)
return True
def _class_label(self, class_index: torch.Tensor, is_topk=False) -> Union[str, List, None]:
class_label = []
classes = self.data.trainset_attr.classes
if classes:
if not is_topk:
if len(class_index) >= 2:
for cidx in class_index:
class_label.append(classes[cidx])
return class_label
else:
return classes[class_index]
else:
for ctn in class_index:
topk_label = []
for cidx in ctn:
topk_label.append(classes[cidx])
class_label.append(topk_label)
return class_label
return None
def _predict_topk(self, prediction: torch.Tensor, kval: int = 5) -> Union[bool, Tuple]:
if is_tensor_label(prediction):
output = F.log_softmax(prediction, dim=1)
ps = torch.exp(output)
probability, class_index = ps.topk(kval, dim=1, largest=True, sorted=True)
class_label = self._class_label(class_index, is_topk=True)
return probability, class_index, class_label
return False
def _predict_label(self, prediction: torch.Tensor) -> Union[bool, Tuple]:
if is_tensor_label(prediction):
class_index = torch.argmax(prediction, dim=1)
class_label = self._class_label(class_index)
return class_index, class_label
return False
@staticmethod
def _build_topk_series( predict, data_dict, target_columns, kval):
for i in range(kval):
percent = predict[0]
classes = predict[2]
data = []
for cls, prc in zip(classes, percent):
if len(cls)==1:
data.append(f"{cls[0]} ({prc[0]*100:.4f}%)")
else:
data.append(f"{cls[i]} ({prc[i]*100:.4f}%)")
data_dict.update({target_columns[0] + "_predict_top" + str(i + 1): data})
def _show_as_dataframe(self, feature: torch.Tensor, predict: torch.Tensor,
target: torch.Tensor, kval, **kwargs: Any) -> pd.DataFrame:
feature_columns = kwargs.get("feature_columns", [])
target_columns = kwargs.get("target_columns", [])
if target.dim() < 2:
tdn = 2 - target.dim()
for i in range(tdn):
target = target.unsqueeze(dim=0)
target = self._class_label(target[0])
data_dict = {}
for idx, col in enumerate(feature_columns):
data_dict.update({col: feature[:, idx]})
if target is not None:
data_dict.update({target_columns[0] + "_truth": target[:]})
if len(predict) == 3:
self._build_topk_series(predict, data_dict, target_columns, kval)
elif len(predict) == 2:
data_dict.update({target_columns[0] + "_predict": predict[1]})
df = | pd.DataFrame(data_dict) | pandas.DataFrame |
import pandas as pd
import glob
data_path = 'E:/GenderClassification/PycharmProjects/GenderClassification/home/abeer/Dropbox/Dataset_HAR project/Brushing Teeth/*'
addrs = glob.glob(data_path)
for i in addrs:
folders = glob.glob(i + '/*')
for j in folders:
csv_files = glob.glob(j + '/*')
shoulder = pd.read_csv('initAcc.csv')
elbow = pd.read_csv('initAcc.csv')
watch = pd.read_csv('initAcc.csv')
for k in csv_files:
if '(1)' in k or '(2)' in k or '(3)' in k or '(4)' in k or '(5)' in k:
continue
elif 'Accelerometer' in k and 'Shoulder' in k:
file = pd.read_csv(k)
shoulder = shoulder.append(file.iloc[:, 3:])
shoulder = shoulder.reset_index(drop=True)
print(shoulder.columns)
elif 'Accelerometer' in k and "Elbow" in k:
file = pd.read_csv(k)
elbow = elbow.append(file.iloc[:, 3:])
elbow = elbow.reset_index(drop=True)
elif 'D5' not in k and "F5" not in k:
file = | pd.read_csv(k) | pandas.read_csv |
import pandas as pd
import threading
from helpers.keywords import keywords_list
from helpers.movie_helper import get_movie_keywords
from database import save_movie
# movies = pd.read_csv("../data/movie_details_after_2010.csv", lineterminator='\n')
# movie_groups = movies.groupby(movies.release_date)
def get_keywords():
years = movies[['release_date']].drop_duplicates()
for idx, row in years.iterrows():
year = row['release_date'].item()
# add_keywords(year)
threading.Thread(target=add_keywords, args=(year,)).start()
def add_keywords(year):
movies_by_year = movie_groups.get_group(year)
movies_updated = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import numba
from functools import partial
import multiprocessing
import random
from scipy import stats
class naive_sghmc():
def __init__(self,lnp,lnp_grad,initialguess,data=None,usedata = False, M = None):
'''
'''
self.data = data
self.ndim = len(initialguess)
self.get_mass_matrix(M)
self.theta0 = initialguess
self.lnp = lnp
self.lnp_grad = lnp_grad
self.res = []
self.r = []
self.usedata = usedata
if usedata:
self.n = len(data)
def get_mass_matrix(self, mass_matrix=None):
"""
get the inverse of the mass matrix
"""
if mass_matrix is None:
self.mass_matrix = np.identity(self.ndim)
self.inverse_mass_matrix = np.identity(self.ndim)
else:
if len(mass_matrix) != self.ndim:
print("Invalid mass matrix")
elif len(mass_matrix) == 1:
self.mass_matrix = mass_matrix
self.inverse_mass_matrix = 1. / mass_matrix
#self.ndim_mass = 1
else:
self.mass_matrix = mass_matrix
self.inverse_mass_matrix = np.linalg.inv(mass_matrix)
#self.ndim_mass = 2
def define_momentum(self):
"""
sample momentum
"""
if self.ndim == 1:
r = np.random.normal(0, np.sqrt(self.mass_matrix))
else:
r = np.random.multivariate_normal(np.zeros(self.ndim), self.mass_matrix)
return r
def velocity(self, r):
"""
Get the velocities (gradient of kinetic) given a momentum vector
"""
if self.ndim == 1:
v = self.inverse_mass_matrix * r
else:
v = np.dot(self.inverse_mass_matrix, r)
return v
def kinetic_energy(self, r):
"""
Get the kinetic energy given momentum
"""
if self.ndim == 1:
K = self.inverse_mass_matrix * r**2
else:
K = np.dot(r.T, np.dot(self.inverse_mass_matrix, r))
return 0.5 * K
def grad_U(self, thetax, size):
"""
get the estimate gradient based on minibatches
pramas theta:
position
pramas size:
number of datapoints
"""
if self.usedata:
df = | pd.DataFrame(self.data) | pandas.DataFrame |
import os
import unittest
from unittest import mock
import numpy as np
import pandas as pd
from dataprofiler import Data, Profiler, ProfilerOptions
from dataprofiler.labelers.base_data_labeler import BaseDataLabeler
from dataprofiler.profilers.profiler_options import FloatOptions, IntOptions
@mock.patch('dataprofiler.profilers.data_labeler_column_profile.'
'DataLabelerColumn.update', return_value=None)
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler', spec=BaseDataLabeler)
class TestProfilerOptions(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data = Data(data=pd.DataFrame([1, 2]), data_type='csv')
def test_default_profiler_options(self, *mocks):
# Allowing Profiler to create default options
profile = Profiler(self.data)
self.assertIsNotNone(profile.options)
self.assertTrue(profile.options.data_labeler.is_enabled)
for column in profile.options.properties:
# TODO: remove the check for correlation option once it's updated to True
if column == 'correlation':
self.assertFalse(profile.options.properties[column].is_enabled)
elif column == 'null_values':
self.assertIsNone(profile.options.properties[column])
else:
self.assertTrue(profile.options.properties[column].is_enabled)
for column_type in ["int", "float", "text"]:
column = profile.options.properties[column_type]
self.assertTrue(column.properties["histogram_and_quantiles"])
self.assertTrue(column.properties["min"])
self.assertTrue(column.properties["max"])
self.assertTrue(column.properties["sum"])
self.assertTrue(column.properties["variance"])
self.assertTrue(column.properties["is_numeric_stats_enabled"])
if column_type != "text":
self.assertTrue(column.properties["num_zeros"].is_enabled)
self.assertTrue(column.properties["num_negatives"].is_enabled)
else:
self.assertFalse(column.properties["num_zeros"].is_enabled)
self.assertFalse(column.properties["num_negatives"].is_enabled)
# Using ProfilerOptions with the default options
options = ProfilerOptions()
profile2 = Profiler(self.data, options=options)
# Stored in Profiler as StructuredOptions
self.assertEqual(profile2.options, options.structured_options)
def test_set_failures(self, *mocks):
options = ProfilerOptions()
# check if no '*.' it raises an error bc attribute not found
expected_error = ("type object 'ProfilerOptions' has no attribute "
"'is_enabled'")
with self.assertRaisesRegex(AttributeError, expected_error):
options.set({"is_enabled": False})
# check if attribute doesn't exist, it raises an error
expected_error = ("type object 'structured_options' has no attribute "
"'test'")
with self.assertRaisesRegex(AttributeError, expected_error):
options.set({"structured_options.test": False})
def test_numerical_stats_option(self, *mocks):
# Assert that the stats are disabled
options = ProfilerOptions()
options.set({"*.is_numeric_stats_enabled": False,
"bias_correction.is_enabled": False})
profile = Profiler(self.data, options=options)
for col_profiler in profile.profile:
profile_column = col_profiler.profile
if profile_column["statistics"] \
and "histogram" in profile_column["statistics"].keys() \
and profile_column["statistics"]["histogram"]:
self.assertIsNone(
profile_column["statistics"]["histogram"]["bin_counts"])
self.assertIsNone(
profile_column["statistics"]["histogram"]["bin_edges"])
self.assertIsNone(profile_column["statistics"]["min"])
self.assertIsNone(profile_column["statistics"]["max"])
self.assertTrue(np.isnan(profile_column["statistics"]["variance"]))
self.assertIsNone(profile_column["statistics"]["quantiles"][0])
self.assertTrue(np.isnan(profile_column["statistics"]["skewness"]))
self.assertTrue(np.isnan(profile_column["statistics"]["kurtosis"]))
# Assert that the stats are enabled
options.set({"*.is_numeric_stats_enabled": True,
"bias_correction.is_enabled": True})
profile = Profiler(self.data, options=options)
for col_profiler in profile.profile:
profile_column = col_profiler.profile
if profile_column["statistics"] \
and "histogram" in profile_column["statistics"].keys() \
and profile_column["statistics"]["histogram"]:
self.assertIsNotNone(
profile_column["statistics"]["histogram"]["bin_counts"])
self.assertIsNotNone(
profile_column["statistics"]["histogram"]["bin_edges"])
self.assertIsNotNone(profile_column["statistics"]["min"])
self.assertIsNotNone(profile_column["statistics"]["max"])
self.assertEqual(0.5, profile_column["statistics"]["variance"])
self.assertIsNotNone(
profile_column["statistics"]["quantiles"][0])
self.assertTrue(profile_column["statistics"]["skewness"] is np.nan)
self.assertTrue(profile_column["statistics"]["kurtosis"] is np.nan)
def test_disable_labeler_in_profiler_options(self, *mocks):
options = ProfilerOptions()
options.structured_options.data_labeler.enable = False
profile = Profiler(self.data, options=options)
for col_profiler in profile.profile:
profile_column = col_profiler.profile
if profile_column["statistics"] \
and "data_label_probability" in \
profile_column["statistics"].keys():
self.assertIsNone(profile_column["statistics"]
["data_label_probability"])
def test_disabling_all_columns(self, *mocks):
options = ProfilerOptions()
options.structured_options.text.is_enabled = False
options.structured_options.float.is_enabled = False
options.structured_options.int.is_enabled = False
options.structured_options.datetime.is_enabled = False
options.structured_options.order.is_enabled = False
options.structured_options.category.is_enabled = False
options.structured_options.chi2_homogeneity.is_enabled = False
options.structured_options.data_labeler.is_enabled = False
profile = Profiler(self.data, options=options)
for col_profiler in profile.profile:
profile_column = col_profiler.profile
self.assertIsNone(profile_column["data_type"])
self.assertTrue("data_label" not in profile_column.keys())
self.assertIsNone(profile_column["categorical"])
self.assertIsNone(profile_column["order"])
self.assertDictEqual({
'sample_size': 2,
'null_count': 0,
'null_types': [],
'null_types_index': {}
}, profile_column["statistics"])
@mock.patch('dataprofiler.profilers.text_column_profile.TextColumn'
'._update_vocab')
def test_disabling_vocab(self, vocab_mock, *mocks):
# Check to see disabling vocab prevents vocab from updating
options = ProfilerOptions()
options.structured_options.text.vocab.is_enabled = False
profile = Profiler(self.data, options=options)
vocab_mock.assert_not_called()
# Check to see default options enable vocab
multi_options = ProfilerOptions()
multi_options.structured_options.multiprocess.is_enabled = False
profile = Profiler(self.data, options=multi_options)
vocab_mock.assert_called()
def test_disabling_all_stats(self, *mocks):
options = ProfilerOptions()
statistical_options = {
"histogram_and_quantiles.is_enabled": False,
"min.is_enabled": False,
"max.is_enabled": False,
"mode.is_enabled": False,
"median.is_enabled": False,
"sum.is_enabled": False,
"variance.is_enabled": False,
"skewness.is_enabled": False,
"kurtosis.is_enabled": False,
"num_zeros.is_enabled": False,
"num_negatives.is_enabled": False,
"median_abs_deviation.is_enabled": False
}
options.set(statistical_options)
# Assert the numerics are properly set
text_options = options.structured_options.text.properties
float_options = options.structured_options.float.properties
int_options = options.structured_options.int.properties
for option in ["histogram_and_quantiles", "min", "max", "sum",
"mode", "variance", "skewness", "kurtosis",
"median_abs_deviation",
"num_zeros", "num_negatives"]:
self.assertFalse(text_options[option].is_enabled)
self.assertFalse(float_options[option].is_enabled)
self.assertFalse(int_options[option].is_enabled)
# Run the profiler
profile = Profiler(self.data, options=options)
# Assert that the stats are non-existent
for col_profiler in profile.profile:
profile_column = col_profiler.profile
if profile_column["statistics"] \
and "histogram" in profile_column["statistics"].keys() \
and profile_column["statistics"]["histogram"]:
self.assertIsNone(
profile_column["statistics"]["histogram"]["bin_counts"])
self.assertIsNone(
profile_column["statistics"]["histogram"]["bin_edges"])
self.assertIsNone(profile_column["statistics"]["min"])
self.assertIsNone(profile_column["statistics"]["max"])
self.assertTrue(np.isnan(profile_column["statistics"]["variance"]))
self.assertIsNone(profile_column["statistics"]["quantiles"][0])
self.assertTrue(profile_column["statistics"]["skewness"] is np.nan)
self.assertTrue(profile_column["statistics"]["kurtosis"] is np.nan)
self.assertTrue(
profile_column["statistics"]["median_abs_deviation"] is np.nan)
self.assertTrue(np.isnan(profile_column["statistics"]["mode"]))
self.assertTrue(np.isnan(profile_column["statistics"]["median"]))
def test_validate(self, *mocks):
options = ProfilerOptions()
options.structured_options.data_labeler.is_enabled = "Invalid"
options.structured_options.data_labeler.data_labeler_dirpath = 5
options.structured_options.int.max = "Invalid"
expected_error = (
"ProfilerOptions.structured_options.int.max must be a "
"BooleanOption.\n"
"ProfilerOptions.structured_options.data_labeler.is_enabled must be"
" a Boolean.\n"
"ProfilerOptions.structured_options.data_labeler."
"data_labeler_dirpath must be a string.")
with self.assertRaisesRegex(ValueError, expected_error):
options.validate()
def test_validate_numeric_stats(self, *mocks):
options = ProfilerOptions()
numerical_options = {
"histogram_and_quantiles.is_enabled": False,
"min.is_enabled": False,
"max.is_enabled": False,
"mode.is_enabled": False,
"median.is_enabled": False,
"sum.is_enabled": False,
"variance.is_enabled": True,
"skewness.is_enabled": False,
"kurtosis.is_enabled": False,
"median_abs_deviation.is_enabled": False
}
# Asserts error since sum must be toggled on if variance is
expected_error = (
"ProfilerOptions.structured_options.int: The numeric stats must "
"toggle on the sum if the variance is toggled on.\n"
"ProfilerOptions.structured_options.float: The numeric stats must "
"toggle on the sum if the variance is toggled on.\n"
"ProfilerOptions.structured_options.text: The numeric stats must "
"toggle on the sum if the variance is toggled on."
)
options.set(numerical_options)
with self.assertRaisesRegex(ValueError, expected_error):
options.validate()
# test warns if is_numeric_stats_enabled = False
numerical_options = {
"*.is_numeric_stats_enabled": False,
}
options.set(numerical_options)
with self.assertWarnsRegex(UserWarning,
'ProfilerOptions.structured_options.int.'
'numeric_stats: The numeric stats are '
'completely disabled.'):
options.validate()
def test_setting_options(self, *mocks):
options = ProfilerOptions()
# Ensure set works appropriately
options.set({
"data_labeler.is_enabled": False,
"min.is_enabled": False,
"structured_options.data_labeler.data_labeler_dirpath": "test",
"data_labeler.max_sample_size": 15})
text_options = options.structured_options.text.properties
float_options = options.structured_options.float.properties
int_options = options.structured_options.int.properties
data_labeler_options = options.structured_options.data_labeler \
.properties
self.assertFalse(options.structured_options.data_labeler.is_enabled)
self.assertFalse(text_options["min"].is_enabled)
self.assertFalse(float_options["min"].is_enabled)
self.assertFalse(int_options["min"].is_enabled)
self.assertEqual(data_labeler_options["data_labeler_dirpath"], "test")
self.assertEqual(data_labeler_options["max_sample_size"], 15)
# Ensure direct attribute setting works appropriately
options.structured_options.data_labeler.max_sample_size = 12
options.structured_options.text.histogram_and_quantiles\
.is_enabled = True
options.structured_options.text.is_enabled = False
text_options = options.structured_options.text.properties
data_labeler_options = options.structured_options.data_labeler \
.properties
self.assertEqual(data_labeler_options["max_sample_size"], 12)
self.assertTrue(text_options["histogram_and_quantiles"].is_enabled)
self.assertFalse(text_options["is_enabled"])
# check direct attribute access after set
float_options = FloatOptions()
float_options.set({"precision.is_enabled": False,
"min.is_enabled": False,
"*.is_enabled": False})
self.assertFalse(float_options.precision.is_enabled)
self.assertFalse(float_options.min.is_enabled)
self.assertFalse(float_options.is_enabled)
def test_improper_profile_options(self, *mocks):
with self.assertRaisesRegex(
ValueError, "The profile options must be passed as a "
"ProfileOptions object."):
profile = Profiler(self.data, options="Strings are not accepted")
with self.assertRaisesRegex(
ValueError, "ProfilerOptions.structured_options.text.max."
"is_enabled must be a Boolean."):
profile_options = ProfilerOptions()
profile_options.structured_options.text.max.is_enabled = "String"
profile_options.validate()
def test_invalid_options_type(self, *mocks):
# Test incorrect data labeler options
options = ProfilerOptions()
options.structured_options.data_labeler = IntOptions()
with self.assertRaisesRegex(
ValueError, r"data_labeler must be a\(n\) DataLabelerOptions."):
profile = Profiler(self.data, options=options)
# Test incorrect float options
options = ProfilerOptions()
options.structured_options.float = IntOptions()
with self.assertRaisesRegex(
ValueError, r"float must be a\(n\) FloatOptions."):
profile = Profiler(self.data, options=options)
@mock.patch('dataprofiler.profilers.float_column_profile.FloatColumn.'
'_update_precision')
def test_float_precision(self, update_precision, *mocks):
options = ProfilerOptions()
options.structured_options.float.precision.is_enabled = False
options.structured_options.multiprocess.is_enabled = False
profile = Profiler(self.data, options=options)
update_precision.assert_not_called()
multi_options = ProfilerOptions()
multi_options.structured_options.multiprocess.is_enabled = False
profile = Profiler(self.data, options=multi_options)
update_precision.assert_called()
def test_set_attribute_error(self, *mocks):
options = ProfilerOptions()
with self.assertRaisesRegex(AttributeError,
"type object \'structured_options."
"data_labeler.is_enabled\' has no attribute"
" \'is_here\'"):
options.set({"data_labeler.is_enabled.is_here": False})
def test_is_prop_enabled(self, *mocks):
options = ProfilerOptions()
with self.assertRaisesRegex(AttributeError,
"Property \"Invalid\" does not exist in "
"TextOptions."):
options.structured_options.text.is_prop_enabled("Invalid")
# This test is to ensure is_prop_enabled works for BooleanOption objects
options.structured_options.int.min.is_enabled = True
self.assertTrue(options.structured_options.int.is_prop_enabled("min"))
# This test is to ensure is_prop_enabled works for bools
options.structured_options.int.max.is_enabled = True
options.structured_options.int.variance.is_enabled = True
options.structured_options.int.histogram_and_quantiles.is_enabled = True
options.structured_options.int.sum.is_enabled = True
self.assertTrue(options.structured_options.int.
is_prop_enabled("is_numeric_stats_enabled"))
def test_setting_overlapping_option(self, *mocks):
options = ProfilerOptions()
# Raises error if overlap option set
sets = [
{"data_labeler.data_labeler_object": 3},
{"data_labeler.data_labeler_dirpath": 3},
{"text.is_enabled": False},
{"text.vocab.is_enabled": False}
]
for set_dict in sets:
msg = f"Attempted to set options {set_dict} in ProfilerOptions " \
f"without specifying whether to set them for " \
f"StructuredOptions or UnstructuredOptions."
with self.assertRaisesRegex(ValueError, msg):
options.set(set_dict)
# Works still for disabling both labelers
options.set({"data_labeler.is_enabled": False})
self.assertFalse(options.structured_options.data_labeler.is_enabled)
self.assertFalse(options.unstructured_options.data_labeler.is_enabled)
# Works for specifying which to set for
options = ProfilerOptions()
options.set(
{"unstructured_options.data_labeler.data_labeler_object": 23})
self.assertIsNone(options.structured_options.data_labeler.
data_labeler_object)
self.assertEqual(23, options.unstructured_options.data_labeler.
data_labeler_object)
options = ProfilerOptions()
options.set({"structured_options.data_labeler.data_labeler_dirpath":
"Hello There"})
self.assertEqual("Hello There", options.structured_options.
data_labeler.data_labeler_dirpath)
self.assertIsNone(options.unstructured_options.data_labeler.
data_labeler_dirpath)
options = ProfilerOptions()
options.set({"unstructured_options.text.is_enabled": False})
self.assertTrue(options.structured_options.text.is_enabled)
self.assertFalse(options.unstructured_options.text.is_enabled)
options = ProfilerOptions()
options.set({"structured_options.text.vocab.is_enabled": False})
self.assertFalse(options.structured_options.text.vocab.is_enabled)
self.assertTrue(options.unstructured_options.text.vocab.is_enabled)
def test_eq(self, *mocks):
options = ProfilerOptions()
options2 = ProfilerOptions()
options.unstructured_options.data_labeler.is_enabled = False
self.assertNotEqual(options, options2)
options2.unstructured_options.data_labeler.is_enabled = False
self.assertEqual(options, options2)
options.structured_options.float.precision.sample_ratio = 0.1
self.assertNotEqual(options, options2)
options2.structured_options.float.precision.sample_ratio = 0.15
self.assertNotEqual(options, options2)
options.structured_options.float.precision.sample_ratio = 0.1
self.assertNotEqual(options, options2)
@mock.patch('dataprofiler.profilers.data_labeler_column_profile.'
'DataLabelerColumn.update', return_value=None)
@mock.patch('dataprofiler.profilers.profile_builder.DataLabeler')
class TestDataLabelerCallWithOptions(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data = Data(data= | pd.DataFrame([1, 2]) | pandas.DataFrame |
import os
import unittest
from typeguard import typechecked
from typing import List, Dict
import pandas as pd
from cgnal.core.logging.defaults import getDefaultLogger
from cgnal.core.tests.core import logTest, TestCase
from cgnal.core.utils.decorators import lazyproperty as lazy, param_check
from cgnal.core.utils.dict import (
groupIterable,
pairwise,
union,
flattenKeys,
unflattenKeys,
filterNones,
groupBy,
)
from cgnal.core.utils.fs import (
mkdir,
create_dir_if_not_exists,
get_lexicographic_dirname,
)
from cgnal.core.utils.pandas import is_sparse, loc
from tests import TMP_FOLDER
logger = getDefaultLogger()
class TestUtilsDict(TestCase):
@logTest
def test_groupIterable(self):
self.assertEqual(
[
el
for el in groupIterable(
{"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6}, batch_size=3
)
],
[["a", "b", "c"], ["d", "e", "f"]],
)
self.assertEqual(
[
el
for el in groupIterable(
{"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}, batch_size=3
)
],
[["a", "b", "c"], ["d", "e"]],
)
self.assertEqual(
[
el
for el in groupIterable(
{"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "g": 7},
batch_size=3,
)
],
[["a", "b", "c"], ["d", "e", "f"], ["g"]],
)
@logTest
def test_pairwise(self):
self.assertEqual(
[el for el in pairwise({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6})],
[("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f")],
)
self.assertEqual([el for el in pairwise({"a": 1})], [])
@logTest
def test_union(self):
self.assertEqual(
union({"1": {"a": 1}}, filterNones({"1": {"a": None}, "b": 1})),
{"1": {"a": 1}, "b": 1},
)
self.assertEqual(
union({"1": {"a": 1}}, filterNones({"1": {"a": 2}, "b": None})),
{"1": {"a": 2}},
)
self.assertEqual(
union({"1": None}, {"1": 1, "2": 3}, {"1": {"1a": 1, "1b": 2}, "3": 4}),
{"1": {"1a": 1, "1b": 2}, "2": 3, "3": 4},
)
@logTest
def test_flattenKeys(self):
self.assertEqual(
flattenKeys({"a": {"b": {"c": 2}}, "d": 2, "e": 3}, sep="."),
{"a.b.c": 2, "d": 2, "e": 3},
)
self.assertEqual(
flattenKeys({"a": {"b": {"c": 2}}, "a": 2, "e": 3}), {"a": 2, "e": 3}
)
@logTest
def test_unflattenKeys(self):
self.assertEqual(
unflattenKeys({"a.b.c": 2, "d": 2, "e": 3}, sep="."),
{"a": {"b": {"c": 2}}, "d": 2, "e": 3},
)
self.assertEqual(
unflattenKeys({"a.b.c": 2, "d": 2, "e": 3}, sep="_"),
{"a.b.c": 2, "d": 2, "e": 3},
)
@logTest
def test_filterNones(self):
self.assertEqual(filterNones({"a": 1, "b": None}), {"a": 1})
@logTest
def test_groupBy(self):
self.assertEqual(
[(k, v) for k, v in groupBy(["abc", "ab", "bcd", "c"], key=len)],
[(1, ["c"]), (2, ["ab"]), (3, ["abc", "bcd"])],
)
class TestUtilsFs(TestCase):
@logTest
def test_mkdir(self):
directory = os.path.join("/tmp", "test_utils_fs")
mkdir(directory)
self.assertTrue(os.path.exists(directory))
os.rmdir(directory)
@logTest
def test_create_dir_if_not_exists(self):
directory = os.path.join("/tmp", "test_utils_fs")
create_dir_if_not_exists(directory)
self.assertTrue(os.path.exists(directory))
os.rmdir(directory)
@logTest
def test_get_lexicographic_dirname(self):
create_dir_if_not_exists(os.path.join("/tmp", "zzz"))
self.assertEqual(get_lexicographic_dirname("/tmp", first=False), "zzz")
os.rmdir(os.path.join("/tmp", "zzz"))
class TestPandas(TestCase):
@logTest
def test_is_sparse(self):
self.assertTrue(
is_sparse(
pd.DataFrame(
{
"v1": | pd.arrays.SparseArray([0, 0, 0, 0, 1]) | pandas.arrays.SparseArray |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
if not isinstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that get_values wraps.
Examples
--------
Getting the `Index` values of a `DataFrame`:
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> df
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> df.index.get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
>>> idx.get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.get_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
pandas.Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.astype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.any(isna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._ndarray_values.ravel(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desrired
try:
res = data.astype('i8', copy=False)
if (res == data).all():
return Int64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.astype('u8', copy=False)
if (res == data).all():
return UInt64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return {k: getattr(self, k, None) for k in self._attributes}
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
def __copy__(self, **kwargs):
return self.copy(**kwargs)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join(u("%s=%s") % (k, v) for k, v in attrs)
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatter function
"""
return default_pprint
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
return format_object_attrs(self)
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
index : Index, optional
index of resulting Series. If None, defaults to original index
name : string, optional
name of resulting Series. If None, defaults to name of original
index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
return Series(self._to_embed(), index=index, name=name)
def to_frame(self, index=True):
"""
Create a DataFrame with a column containing the Index.
.. versionadded:: 0.21.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index.
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
result = DataFrame(self._shallow_copy(), columns=[self.name or 0])
if index:
result.index = self
return result
def _to_embed(self, keep_tz=False, dtype=None):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
return self.values.copy()
_index_shared_docs['astype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype):
return self.copy() if copy else self
elif is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(self.values, name=self.name, dtype=dtype,
copy=copy)
try:
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
except (TypeError, ValueError):
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not is_scalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError('Names must be a list-like')
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
# GH 20527
# All items in 'name' need to be hashable:
for name in values:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
from .multi import MultiIndex
if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def _summary(self, name=None):
"""
Return a summarized representation
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def summary(self, name=None):
"""
Return a summarized representation
.. deprecated:: 0.23.0
"""
warnings.warn("'summary' is deprecated and will be removed in a "
"future version.", FutureWarning, stacklevel=2)
return self._summary(name)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
Examples
--------
>>> Index([1, 2, 3]).is_monotonic_increasing
True
>>> Index([1, 2, 2]).is_monotonic_increasing
True
>>> Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
Examples
--------
>>> Index([3, 2, 1]).is_monotonic_decreasing
True
>>> Index([3, 2, 2]).is_monotonic_decreasing
True
>>> Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@property
def _is_strictly_monotonic_increasing(self):
"""return if the index is strictly monotonic increasing
(only increasing) values
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
@property
def _is_strictly_monotonic_decreasing(self):
"""return if the index is strictly monotonic decreasing
(only decreasing) values
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
"""
Check if the Index holds categorical data.
Returns
-------
boolean
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
Examples
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
return self.inferred_type in ['categorical']
def is_interval(self):
return self.inferred_type in ['interval']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
_index_shared_docs['_convert_scalar_indexer'] = """
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
# on anything but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
_index_shared_docs['_convert_slice_indexer'] = """
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@ | Appender(_index_shared_docs['_convert_slice_indexer']) | pandas.util._decorators.Appender |
import math
import pandas as pd
import numpy as np
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import mean_squared_error
from sklearn import svm
def get_past_midfielders():
data = pd.read_csv('../resources/merged.csv', sep=',', encoding='utf-8', index_col=0)
model = data[['player_id', 'name', 'season', 'pos', 'round', 'team_rank', 'opponent_team_rank', 'team_pot', 'opp_pot',
'concede_pot', 'opp_concede_pot', 'prev_points', 'form_points', 'total_points',
'long_form', 'ict_form']]
MidfielderModal = model.loc[model['pos'] == 'Defender']
MidfielderModal.drop('pos', axis=1, inplace=True)
MidfielderModal.sort_values(['season', 'round'], ascending=True, inplace=True)
MidfielderModal.to_csv('../resources/predictions/MIDFIELDERS.csv', sep=',', encoding='utf-8')
players = MidfielderModal[8587:]
keys = MidfielderModal['round']
values = pd.cut(MidfielderModal['round'], 3, labels=[1, 2, 3])
dictionary = dict(zip(keys, values))
MidfielderModal['round'] = values
X = MidfielderModal.drop(['total_points', 'season', 'player_id', 'name'], axis=1)
y = MidfielderModal[['total_points']]
X_train = X[:8586]
X_test = X[8587:]
y_train = y[:8586]
y_test = y[8587:]
regression_model = LinearRegression()
regression_model.fit(X_train, y_train)
score = regression_model.score(X_test, y_test)
y_pred = regression_model.predict(X_test)
testing = pd.concat([X_test, y_test], 1)
testing['Predicted'] = np.round(y_pred, 1)
testing['Prediction_Error'] = testing['total_points'] - testing['Predicted']
testing['player_id'] = 0
testing['name'] = 0
testing['player_id'] = players.player_id
testing['name'] = players.name
testing['round'] = 34
testing.to_csv('../resources/past/34_MIDS.csv', sep=',', encoding='utf-8')
# get_past_midfielders()
def merge():
one = pd.read_csv('../resources/predictions/30FOR.csv', sep=',', encoding='utf-8', index_col=0)
two = pd.read_csv('../resources/predictions/31FOR.csv', sep=',', encoding='utf-8', index_col=0)
three = pd.read_csv('../resources/predictions/32FOR.csv', sep=',', encoding='utf-8', index_col=0)
four = pd.read_csv('../resources/predictions/33FOR.csv', sep=',', encoding='utf-8', index_col=0)
five = pd.read_csv('../resources/predictions/34FOR.csv', sep=',', encoding='utf-8', index_col=0)
dfarray = [one, two, three, four, five]
MergedData = pd.concat(dfarray)
MergedData['pos'] = 'Forward'
MergedData['save_form'] = 0
MergedData = MergedData[['player_id', 'name', 'pos', 'Predicted', 'total_points', 'Prediction_Error', 'round', 'concede_pot',
'opp_pot', 'team_rank', 'opponent_team_rank', 'form_points', 'ict_form',
'save_form', 'prev_points']]
MergedData.to_csv('../resources/predictions/forwards_gw30.csv', sep=',', encoding='utf-8')
merge()
def get_defenders():
data = pd.read_csv('../resources/merged.csv', sep=',', encoding='utf-8', index_col=0)
model = data[
['player_id', 'name', 'season', 'pos', 'round', 'team_rank', 'opponent_team_rank', 'team_pot', 'opp_pot',
'concede_pot', 'opp_concede_pot', 'prev_points', 'form_points', 'total_points',
'long_form', 'ict_form']]
DefenderModal = model.loc[model['pos'] == 'Defender']
DefenderModal.drop('pos', axis=1, inplace=True)
DefenderModal.sort_values(['season', 'round'], ascending=True, inplace=True)
DefenderModal.to_csv('../resources/predictions/DEFENDERS.csv', sep=',', encoding='utf-8')
players = DefenderModal[6228:]
keys = DefenderModal['round']
values = | pd.cut(DefenderModal['round'], 3, labels=[1, 2, 3]) | pandas.cut |
# This is a test file intended to be used with pytest
# pytest automatically runs all the function starting with "test_"
# see https://docs.pytest.org for more information
import math
import os
import sys
import numpy as np
import pandas as pd
## Add stuff to the path to enable exec outside of DSS
plugin_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.join(plugin_root, 'python-lib'))
import dku_timeseries
JUST_BEFORE_SPRING_DST = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
JUST_BEFORE_FALL_DST = pd.Timestamp('20191027 02:59:00').tz_localize('CET',
ambiguous=True) # It's ambiguous because there are 2 instants with these dates! We
# select the first
TIME_COL = 'time_col'
DATA_COL = 'data_col'
GROUP_COL = 'group_col'
### Helpers to create test data, should be fixtures at some point I guess
def _make_df_with_one_col(column_data, period=pd.DateOffset(seconds=1), start_time=JUST_BEFORE_SPRING_DST):
from datetime import datetime
top = datetime.now()
time = pd.date_range(start_time, None, len(column_data), period)
top = datetime.now()
df = pd.DataFrame({TIME_COL: time, DATA_COL: column_data})
return df
def _make_window_aggregator_params(window_width=1):
params = dku_timeseries.WindowAggregatorParams(window_width=window_width)
return params
def _make_window_aggregator(window_width=1):
params = _make_window_aggregator_params(window_width)
return dku_timeseries.WindowAggregator(params)
### Test cases
class TestWindowing:
def test_empty_df(self):
df = _make_df_with_one_col([])
window_aggregator = _make_window_aggregator()
output_df = window_aggregator.compute(df, TIME_COL)
assert output_df.shape == (0, 2)
def test_single_row_df(self):
df = _make_df_with_one_col([33])
window_aggregator = _make_window_aggregator()
output_df = window_aggregator.compute(df, TIME_COL)
assert output_df.shape == (1, 2)
assert output_df[DATA_COL][0] == df[DATA_COL][0]
def test_two_rows_df(self):
length = 2
data = [x for x in range(length)]
df = _make_df_with_one_col(data)
window_aggregator = _make_window_aggregator()
output_df = window_aggregator.compute(df, TIME_COL)
assert output_df[DATA_COL + '_min'][1] == 0
def test_incremental_df_left_closed(self):
length = 100
data = [x for x in range(length)]
df = _make_df_with_one_col(data)
print(df.shape)
params = dku_timeseries.WindowAggregatorParams(window_width=3, closed_option='left')
window_aggregator = dku_timeseries.WindowAggregator(params)
output_df = window_aggregator.compute(df, TIME_COL)
ground_truth = [np.NaN, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]
assert math.isnan(output_df[DATA_COL + '_min'][0])
for x, y in zip(output_df[DATA_COL + '_min'][1:], ground_truth[1:]):
assert output_df[DATA_COL][x] == y
def test_incremental_df_right_closed(self):
length = 100
data = [x for x in range(length)]
df = _make_df_with_one_col(data)
print(df.shape)
params = dku_timeseries.WindowAggregatorParams(window_width=3, closed_option='right', window_type='gaussian')
window_aggregator = dku_timeseries.WindowAggregator(params)
output_df = window_aggregator.compute(df, TIME_COL)
ground_truth = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8]
for x, y in zip(output_df[DATA_COL + '_min'][1:], ground_truth[1:]):
assert output_df[DATA_COL][x] == y
def test_group_window_time_unit(self):
start_time_1 = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
start_time_2 = pd.Timestamp('20190131 02:00:00').tz_localize('CET')
start_time_list = [start_time_1, start_time_2]
len1 = 100
len2 = 10
data1 = range(len1)
data2 = range(len2)
data_list = [data1, data2]
period1 = | pd.DateOffset(seconds=1) | pandas.DateOffset |
"""
This script cleans the data
"""
import json
import lightgbm as lgb
import numpy as np
import pandas as pd
from scipy.signal import savgol_filter as sg
from sklearn.feature_selection import RFECV
from sklearn.metrics import make_scorer
from sklearn.model_selection import GridSearchCV
import shap
from auxiliary import week_of_month, BlockingTimeSeriesSplit
from config import DATA_CONSUMPTION_PROCESSED_FILE, INTERVENTION_CALENDAR, \
DATA_WEATHER_PROCESSED_FILE, BEST_FEATURES_FILE, ALL_NUMERICAL_FEATURES,ALL_CATEGORICAL_FEATURES, \
DATA_VACATIONS_INTERVENTION_FILE, DATA_METADATA_PROCESSED_FILE, DATA_HOLIDAYS_PROCESSED_FILE, \
DATA_ISO_CONSUMPTION_PROCESSED_FILE, DATA_ENTHALPY_GRADIENTS_PROCESSED_FILE, DATA_VACATIONS_FILE, \
DATA_SOLAR_GAINS_PROCESSED_FILE, DYNAMIC_SPACE, EXPERIMENTS, CONTROL_GROUPS, BEST_PARAMETERS_FILE
from custom_scorer_module import scorer_quantile, scorer_rmse
def lgbm_regression_efecto_acumulado_con_linea_base_del_experimento(alpha,
data_mean,
get_best_parameters=False,
get_best_features=False,
use_best_features=False):
# INITIALIZE NEW FIELDS
numerical_features_list = ALL_NUMERICAL_FEATURES
categorical_features_list = ALL_CATEGORICAL_FEATURES
new_field = "GBM_consumption_kWh_" + alpha
data_mean[new_field] = 0.0
for experiment, control in zip(EXPERIMENTS, CONTROL_GROUPS):
# GET DATA ABOUT PERIODS OF INTERVENTION OF THE EXPERIMENTAL PERIOD
intervention_data = INTERVENTION_CALENDAR[experiment]
pre_period = intervention_data[1]
post_period = intervention_data[2]
range_pre_intervention_period = | pd.date_range(start=pre_period[0], end=pre_period[1], freq='D') | pandas.date_range |
import numpy as np
import pandas as pd
from numpy import nan
from pvlib import modelchain, pvsystem
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pytest
from test_pvsystem import sam_data
from conftest import requires_scipy
@pytest.fixture
def system(sam_data):
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_'].copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_snl_ac_system(sam_data):
modules = sam_data['cecmod']
module_parameters = modules['Canadian_Solar_CS5P_220M'].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_adr_ac_system(sam_data):
modules = sam_data['cecmod']
module_parameters = modules['Canadian_Solar_CS5P_220M'].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['adrinverter']
inverter = inverters['Zigor__Sunzet_3_TL_US_240V__CEC_2011_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_snl_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_pvwatts_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverter_parameters = {'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture()
def location():
return Location(32.2, -111, altitude=700)
def test_ModelChain_creation(system, location):
mc = ModelChain(system, location)
@pytest.mark.parametrize('strategy, expected', [
(None, (32.2, 180)), ('None', (32.2, 180)), ('flat', (0, 180)),
('south_at_latitude_tilt', (32.2, 180))
])
def test_orientation_strategy(strategy, expected, system, location):
mc = ModelChain(system, location, orientation_strategy=strategy)
# the || accounts for the coercion of 'None' to None
assert (mc.orientation_strategy == strategy or
mc.orientation_strategy is None)
assert system.surface_tilt == expected[0]
assert system.surface_azimuth == expected[1]
@requires_scipy
def test_run_model(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 183.522449305, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
def test_run_model_with_irradiance(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 1.90054749e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_perez(system, location):
mc = ModelChain(system, location, transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 190.194545796, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_gueymard_perez(system, location):
mc = ModelChain(system, location, airmass_model='gueymard1993',
transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 190.194760203, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
@requires_scipy
def test_run_model_with_weather(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
weather = pd.DataFrame({'wind_speed':5, 'temp_air':10}, index=times)
ac = mc.run_model(times, weather=weather).ac
expected = pd.Series(np.array([ 201.691634921, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
@requires_scipy
def test_run_model_tracker(system, location):
system = SingleAxisTracker(module_parameters=system.module_parameters,
inverter_parameters=system.inverter_parameters)
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([119.067713606, nan]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
expect = pd.DataFrame(np.
array([[ 54.82513187, 90. , 11.0039221 , 11.0039221 ],
[ nan, 0. , 0. , nan]]),
columns=['aoi', 'surface_azimuth', 'surface_tilt', 'tracker_theta'],
index=times)
expect = expect[['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt']]
assert_frame_equal(mc.tracking, expect, check_less_precise=2)
def poadc(mc):
mc.dc = mc.total_irrad['poa_global'] * 0.2
mc.dc.name = None # assert_series_equal will fail without this
@requires_scipy
@pytest.mark.parametrize('dc_model, expected', [
('sapm', [181.604438144, -2.00000000e-02]),
('singlediode', [181.044109596, -2.00000000e-02]),
('pvwatts', [190.028186986, 0]),
(poadc, [189.183065667, 0]) # user supplied function
])
def test_dc_models(system, cec_dc_snl_ac_system, pvwatts_dc_pvwatts_ac_system,
location, dc_model, expected):
dc_systems = {'sapm': system, 'singlediode': cec_dc_snl_ac_system,
'pvwatts': pvwatts_dc_pvwatts_ac_system,
poadc: pvwatts_dc_pvwatts_ac_system}
system = dc_systems[dc_model]
mc = ModelChain(system, location, dc_model=dc_model,
aoi_model='no_loss', spectral_model='no_loss')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array(expected), index=times)
assert_series_equal(ac, expected, check_less_precise=2)
@requires_scipy
@pytest.mark.parametrize('dc_model', ['sapm', 'singlediode', 'pvwatts_dc'])
def test_infer_dc_model(system, cec_dc_snl_ac_system,
pvwatts_dc_pvwatts_ac_system, location, dc_model,
mocker):
dc_systems = {'sapm': system, 'singlediode': cec_dc_snl_ac_system,
'pvwatts_dc': pvwatts_dc_pvwatts_ac_system}
system = dc_systems[dc_model]
m = mocker.spy(system, dc_model)
mc = ModelChain(system, location,
aoi_model='no_loss', spectral_model='no_loss')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
mc.run_model(times)
assert m.call_count == 1
assert isinstance(mc.dc, (pd.Series, pd.DataFrame))
def acdc(mc):
mc.ac = mc.dc
@requires_scipy
@pytest.mark.parametrize('ac_model, expected', [
('snlinverter', [181.604438144, -2.00000000e-02]),
('adrinverter', [np.nan, -25.00000000e-02]),
('pvwatts', [190.028186986, 0]),
(acdc, [199.845296258, 0]) # user supplied function
])
def test_ac_models(system, cec_dc_adr_ac_system, pvwatts_dc_pvwatts_ac_system,
location, ac_model, expected):
ac_systems = {'snlinverter': system, 'adrinverter': cec_dc_adr_ac_system,
'pvwatts': pvwatts_dc_pvwatts_ac_system,
acdc: pvwatts_dc_pvwatts_ac_system}
system = ac_systems[ac_model]
mc = ModelChain(system, location, ac_model=ac_model,
aoi_model='no_loss', spectral_model='no_loss')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array(expected), index=times)
assert_series_equal(ac, expected, check_less_precise=2)
def constant_aoi_loss(mc):
mc.aoi_modifier = 0.9
@requires_scipy
@pytest.mark.parametrize('aoi_model, expected', [
('sapm', [182.784057666, -2.00000000e-02]),
('ashrae', [180.825930547, -2.00000000e-02]),
('physical', [181.453077805, -2.00000000e-02]),
('no_loss', [181.604438144, -2.00000000e-02]),
(constant_aoi_loss, [164.997043305, -2e-2])
])
def test_aoi_models(system, location, aoi_model, expected):
mc = ModelChain(system, location, dc_model='sapm',
aoi_model=aoi_model, spectral_model='no_loss')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array(expected), index=times)
| assert_series_equal(ac, expected, check_less_precise=2) | pandas.util.testing.assert_series_equal |
import strax
import pandas as pd
import mongomock
import numpy as np
from datetime import datetime
from pandas._testing import assert_frame_equal
import pytz
import pymongo
import pytest
import unittest
@mongomock.patch()
def corrections():
dummy_client = pymongo.MongoClient()
cmt = strax.CorrectionsInterface(client=dummy_client)
return cmt
def make_dummy_df():
"""
Make a dummy pandas.dataframe()
"""
dates = [datetime(2017, 1, 1), datetime(2021, 1, 1), datetime(2021, 9, 23)]
df = pd.DataFrame({'ONLINE' : [10.,10., 8.],
'v1' : [12., 12., 14.],
'v2' : [13., 14., np.nan],
'time': dates})
df['time'] = pd.to_datetime(df['time'], utc=True)
df = df.set_index('time')
return df
def test_db():
cmt = corrections()
df = make_dummy_df()
# write to the DB
cmt.write('test_db', df)
# read from the DB
df2 = cmt.read('test_db')
# pandas.DataFrame should be identical
| assert_frame_equal(df, df2) | pandas._testing.assert_frame_equal |
'''
Author: <NAME>
Create Time: 2021-10-14 19:35:38
Copyright: Copyright (c) 2021 <NAME>. See LICENSE for details
'''
from OpenFlows.Domain.ModelingElements.NetworkElements import IActiveElementsInput, IBaseLinksInput, IBasePolygonInput, IBasePolygonsInput, INetworkElements, IPointNodesInput
from OpenFlows.Water.Domain import IWaterModel
from OpenFlows.Water.Domain.ModelingElements.NetworkElements import IBaseDirectedNodesInput, IBaseNodesInput, IBasePumpsInput, IBaseValvesInput, ICheckValveElementsInput, IConventionalTanksInput, ICustomerMetersInput, IDemandNodesInput, IFireFlowNodesInput, IFlowControlValvesInput, IGeneralPurposeValves, IGeneralPurposeValvesInput, IHydrantsInput, IHydroTanksInput, IIsolationValveElementsInput, IJunctionsInput, ILateralsInput, IPhysicalNodeElementsInput, IPipes, IPressureBreakingValves, IPressureBreakingValvesInput, IPressureSustainingValvesInput, IPressureValvesInput, IPumpStations, IPumpStationsInput, IPumps, IPumpsInput, IReservoirs, ISCADAElements, ISCADAElementsInput, ITanks, ITanksInput, ITaps, IThrottleControlValvesInput, IVSPBsInput, IWaterQualityElementsInput, IWaterQualityNodesInput, IWaterZoneableNetworkElementsInput
import numpy as np
import pandas as pd
import networkx as nx
from typing import Any, List, Type
class NetworkInput:
# region Fields
__waterModel: IWaterModel
# endregion
# region Constructor
def __init__(self, water_model: IWaterModel) -> None:
self.__waterModel = water_model
pass
def __repr__(self) -> str:
line1 = f"{__class__.__name__}: {self.__waterModel}."
line2 = f"Pipe Count: {self.__waterModel.Network.Pipes.Count}"
line3 = f"Junction Count: {self.__waterModel.Network.Junctions.Count}"
return f"{line1} {line2} {line3}."
# endregion
# region Public Methods
def get_networkx_graph(self, laterals: bool = False) -> nx.Graph:
columns = ["Id", "Label"] if laterals else [
"Id", "Label", "Diameter", "IsActive"]
links_df = self.pipe_df[columns].copy()
if laterals:
links_df.append(self.lateral_df[columns])
graph: nx.Graph = nx.from_pandas_edgelist(
df=links_df,
source="StartNodeId",
target="StopNodeId",
edge_attr=columns)
return graph
# endregion // Public Methods
# region Public Properties (Network Elements DF)
@property
def pipe_df(self) -> pd.DataFrame:
return self.__get_pipe_input(self.__waterModel.Network.Pipes)
@property
def lateral_df(self) -> pd.DataFrame:
return self.__get_lateral_input(self.__waterModel.Network.Laterals)
@property
def junction_df(self) -> pd.DataFrame:
return self.__get_junction_input(self.__waterModel.Network.Junctions)
@property
def hydrant_df(self) -> pd.DataFrame:
return self.__get_hydrant_input(self.__waterModel.Network.Hydrants)
@property
def tank_df(self) -> pd.DataFrame:
return self.__get_tank_input(self.__waterModel.Network.Tanks)
@property
def reservoir_df(self) -> pd.DataFrame:
return self.__get_reservoir_input(self.__waterModel.Network.Reservoirs)
@property
def tap_df(self) -> pd.DataFrame:
return self.__get_tap_input(self.__waterModel.Network.Taps)
@property
def pump_df(self) -> pd.DataFrame:
return self.__get_pump_input(self.__waterModel.Network.Pumps)
@property
def pump_stn_df(self) -> pd.DataFrame:
return self.__get_pump_stn_input(self.__waterModel.Network.PumpStations)
@property
def customer_meter_df(self) -> pd.DataFrame:
return self.__get_customer_meter_input(self.__waterModel.Network.CustomerMeters)
@property
def scada_elem_df(self) -> pd.DataFrame:
return self.__get_scada_elem_input(self.__waterModel.Network.SCADAElements)
@property
def vspb_df(self) -> pd.DataFrame:
return self.__get_vspb_input(self.__waterModel.Network.VSPBs)
@property
def prv_df(self) -> pd.DataFrame:
return self.__get_prv_input(self.__waterModel.Network.PRVs)
@property
def psv_df(self) -> pd.DataFrame:
return self.__get_psv_input(self.__waterModel.Network.PSVs)
@property
def pbv_df(self) -> pd.DataFrame:
return self.__get_pbv_input(self.__waterModel.Network.PBVs)
@property
def fcv_df(self) -> pd.DataFrame:
return self.__get_fcv_input(self.__waterModel.Network.FCVs)
@property
def tcv_df(self) -> pd.DataFrame:
return self.__get_tcv_input(self.__waterModel.Network.TCVs)
@property
def gpv_df(self) -> pd.DataFrame:
return self.__get_gpv_input(self.__waterModel.Network.GPVs)
@property
def iso_valve_df(self) -> pd.DataFrame:
return self.__get_iso_valve_input(self.__waterModel.Network.IsolationValves)
@property
def hydro_tank_df(self) -> pd.DataFrame:
return self.__get_hydro_tank_input(self.__waterModel.Network.HydropneumaticTanks)
@property
def check_valve_df(self) -> pd.DataFrame:
return self.__get_check_valve_input(self.__waterModel.Network.CheckValves)
# endregion // Public Properties
# region Private methods
def __dict_to_value(self, series: pd.Series, data_type: Type) -> pd.Series:
series = series.apply(lambda d: d.Value)
if data_type:
if data_type is str:
series = series.astype("string")
else:
series = series.astype(data_type)
return series
def __get_elements_input(self, elements: INetworkElements) -> pd.DataFrame:
df = pd.DataFrame()
df["Label"] = elements.Labels()
df["Id"] = df["Label"].apply(lambda d: d.Key).astype(pd.Int64Dtype())
df["Label"] = df["Label"].apply(lambda d: d.Value).astype("string")
return df
def __get_physical_elevation_input(self, elements: IPhysicalNodeElementsInput, df: pd.DataFrame) -> pd.DataFrame:
df["Elevation"] = elements.Elevations()
df["Elevation"] = self.__dict_to_value(df["Elevation"], float)
return df
def __get_active_elements_input(self, elements: IActiveElementsInput, df: pd.DataFrame) -> pd.DataFrame:
df["IsActive"] = elements.IsActives()
df["IsActive"] = self.__dict_to_value(df["IsActive"], bool)
return df
def __get_zone_elements_input(self, elements: IWaterZoneableNetworkElementsInput, df: pd.DataFrame) -> pd.DataFrame:
df["Zone"] = elements.Zones()
df["Zone"] = self.__dict_to_value(df["Zone"], None)
df["ZoneId"] = df["Zone"].apply(lambda z: z.Id if z else None)
df["ZoneLabel"] = df["Zone"].apply(
lambda z: z.Label if z else None).astype("string")
return df
def __get_point_node_input(self, elements: IPointNodesInput, df: pd.DataFrame) -> pd.DataFrame:
df["Geometry"] = elements.Geometries()
df["Geometry"] = self.__dict_to_value(df["Geometry"], None)
x_and_y: List[Any] = df["Geometry"].apply(
lambda p: [p.X, p.Y]).tolist()
if x_and_y: # TODO: find the type of x&y
df[["X", "Y"]] = x_and_y
else:
df["X"] = None
df["Y"] = None
return df
def __get_polygons_geometry(self, elements: IBasePolygonsInput, df: pd.DataFrame) -> pd.DataFrame:
df["Geometry"] = elements.Geometries()
df["Geometry"] = self.__dict_to_value(df["Geometry"], None)
df["Geometry"] = df["Geometry"].apply(
lambda pts: [[p.X, p.Y] for p in pts]).tolist()
return df
def __get_water_quality_node_input(self, elements: IWaterQualityElementsInput, df: pd.DataFrame) -> pd.DataFrame:
df["InitAge"] = elements.InitialAge()
df["InitAge"] = self.__dict_to_value(df["InitAge"], float)
df["InitConc"] = elements.InitialConcentration()
df["InitConc"] = self.__dict_to_value(df["InitConc"], float)
df["InitTrace"] = elements.InitialTrace()
df["InitTrace"] = self.__dict_to_value(df["InitTrace"], float)
return df
def __get_installation_year_input(self, elements: Any, df: pd.DataFrame) -> pd.DataFrame:
df["InstallYr"] = elements.InstallationYears()
df["InstallYr"] = self.__dict_to_value(
df["InstallYr"], pd.Int64Dtype())
return df
def __get_minor_loss_node_input(self, elements: Any, df: pd.DataFrame) -> pd.DataFrame:
df["dMLossCoeff"] = elements.DerivedMinorLossCoefficient()
df["dMLossCoeff"] = self.__dict_to_value(df["dMLossCoeff"], float)
df["IsLocalMLoss"] = elements.SpecifyLocalMinorLoss()
df["IsLocalMLoss"] = self.__dict_to_value(df["IsLocalMLoss"], bool)
df["LocalMLossCoeff"] = elements.LocalMinorLossCoefficient()
df["LocalMLossCoeff"] = self.__dict_to_value(
df["LocalMLossCoeff"], float)
return df
def __get_valve_characerstics_input(self, elements: Any, df: pd.DataFrame) -> pd.DataFrame:
df["ValveChrsts"] = elements.ValveCharacteristics()
df["ValveChrsts"] = self.__dict_to_value(df["ValveChrsts"], None)
return df
def __get_hammer_valve_type_input(self, elements: Any, df: pd.DataFrame) -> pd.DataFrame:
df["ValveType"] = elements.ValveTypes()
df["ValveType"] = self.__dict_to_value(
df["ValveType"], pd.Int64Dtype())
return df
def __get_demand_node_input(self, elements: IDemandNodesInput) -> pd.DataFrame:
df = self.__get_base_node_input(elements)
return df
def __get_fire_node_input(self, elements: IFireFlowNodesInput) -> pd.DataFrame:
df = self.__get_demand_node_input(elements)
return df
# region Base Node / Link / Polygon Inputs
def __get_base_node_input(self, elements: IBaseNodesInput) -> pd.DataFrame:
df = self.__get_elements_input(elements)
df = self.__get_physical_elevation_input(elements, df)
df = self.__get_active_elements_input(elements, df)
df = self.__get_zone_elements_input(elements, df)
df = self.__get_water_quality_node_input(elements, df)
df = self.__get_point_node_input(elements, df)
return df
def __get_base_link_input(self, elements: IBaseLinksInput) -> pd.DataFrame:
df = pd.DataFrame()
df = self.__get_elements_input(elements)
df = self.__get_active_elements_input(elements, df)
df["StartNode"] = elements.StartNodes()
df["StartNode"] = self.__dict_to_value(df["StartNode"], None)
df["StartNodeId"] = df["StartNode"].apply(
lambda n: n.Id).astype(pd.Int64Dtype())
df["StopNode"] = elements.StopNodes()
df["StopNode"] = self.__dict_to_value(df["StopNode"], None)
df["StopNodeId"] = df["StopNode"].apply(
lambda n: n.Id).astype(pd.Int64Dtype())
df["IsUDLength"] = elements.IsUserDefinedLengths()
df["IsUDLength"] = self.__dict_to_value(df["IsUDLength"], bool)
df["Length"] = elements.Lengths()
df["Length"] = self.__dict_to_value(df["Length"], float)
df["Geometry"] = elements.Geometries()
df["Geometry"] = self.__dict_to_value(df["Geometry"], None)
return df
def __get_base_polygon_input(self, elements: IBasePolygonsInput) -> pd.DataFrame:
df = pd.DataFrame()
df = self.__get_elements_input(elements)
df = self.__get_active_elements_input(elements, df)
df = self.__get_polygons_geometry(elements, df)
return df
# endregion
def __get_associated_elements(self, elements: Any, df: pd.DataFrame) -> pd.DataFrame:
df["AssocElem"] = elements.AssociatedElements()
df["AssocElem"] = self.__dict_to_value(df["AssocElem"], None)
df["AssocElemId"] = df["AssocElem"].apply(
lambda n: n.Id if n else None).astype(pd.Int64Dtype())
return df
# region Base Elements Input
def __get_base_directed_node_input(self, elements: IBaseDirectedNodesInput) -> pd.DataFrame:
df = self.__get_base_node_input(elements)
df = self.__get_installation_year_input(elements, df)
return df
def __get_base_pump_node_input(self, elements: IPumpsInput) -> pd.DataFrame:
df = self.__get_base_directed_node_input(elements)
df["InitSpeedFactor"] = elements.InitialRelativeSpeedFactors()
df["InitSpeedFactor"] = self.__dict_to_value(
df["InitSpeedFactor"], float)
df["InitStatus"] = elements.InitialStatus()
df["InitStatus"] = self.__dict_to_value(df["InitStatus"], bool)
return df
def __get_base_valve_node_input(self, elements: IBaseValvesInput) -> pd.DataFrame:
df = self.__get_base_directed_node_input(elements)
df = self.__get_minor_loss_node_input(elements, df)
df["InitStatus"] = elements.InitialStatus()
df["InitStatus"] = self.__dict_to_value(df["InitStatus"], None)
df["Diameter"] = elements.Diameters()
df["Diameter"] = self.__dict_to_value(df["Diameter"], float)
return df
def __get_base_tank_node_input(self, elements: IConventionalTanksInput) -> pd.DataFrame:
df = self.__get_demand_node_input(elements)
return df
def __get_conventional_tank_node_input(self, elements: IConventionalTanksInput) -> pd.DataFrame:
df = self.__get_demand_node_input(elements)
df = self.__get_water_quality_node_input(elements, df)
df["SectionType"] = elements.TankSection()
df["SectionType"] = self.__dict_to_value(
df["SectionType"], None)
df["ActiveVolFull"] = elements.ActiveVolumeFull()
df["ActiveVolFull"] = self.__dict_to_value(
df["ActiveVolFull"], float)
df["Diameter"] = elements.Diameter()
df["Diameter"] = self.__dict_to_value(
df["Diameter"], float)
df["AvgArea"] = elements.AverageArea()
df["AvgArea"] = self.__dict_to_value(
df["AvgArea"], float)
df["BaseElev"] = elements.BaseElevation()
df["BaseElev"] = self.__dict_to_value(
df["BaseElev"], float)
df["MinLevel"] = elements.MinimumLevel()
df["MinLevel"] = self.__dict_to_value(
df["MinLevel"], float)
df["MaxLevel"] = elements.MaximumLevel()
df["MaxLevel"] = self.__dict_to_value(
df["MaxLevel"], float)
df["InitLevel"] = elements.InitialLevel()
df["InitLevel"] = self.__dict_to_value(
df["InitLevel"], float)
df["UseHighAlarm"] = elements.UseHighAlarm()
df["UseHighAlarm"] = self.__dict_to_value(
df["UseHighAlarm"], bool)
df["HighAlarmLvl"] = elements.HighAlarmLevel()
df["HighAlarmLvl"] = self.__dict_to_value(
df["HighAlarmLvl"], float)
df["UseLowAlarm"] = elements.UseLowAlarm()
df["UseLowAlarm"] = self.__dict_to_value(
df["UseLowAlarm"], bool)
df["LowAlarmLvl"] = elements.LowAlarmLevel()
df["LowAlarmLvl"] = self.__dict_to_value(
df["LowAlarmLvl"], float)
df["InactiveVol"] = elements.InactiveVolume()
df["InactiveVol"] = self.__dict_to_value(
df["InactiveVol"], float)
return df
def __get_base_pressure_valve_node_input(self, elements: IPressureValvesInput) -> pd.DataFrame:
df = self.__get_base_valve_node_input(elements)
df["PressureSettings"] = elements.PressureValveSettings()
df["PressureSettings"] = self.__dict_to_value(
df["PressureSettings"], float)
df["InitSetting"] = elements.InitialSettings()
df["InitSetting"] = self.__dict_to_value(df["InitSetting"], None)
return df
def __get_general_purpose_valve_node_input(self, elements: IGeneralPurposeValvesInput) -> pd.DataFrame:
df = self.__get_base_valve_node_input(elements)
df["GpvHlCurve"] = elements.GPVHeadlossCurves()
df["GpvHlCurve"] = self.__dict_to_value(df["GpvHlCurve"], None)
df["ValveChrsts"] = elements.ValveCharacteristics()
df["ValveChrsts"] = self.__dict_to_value(df["ValveChrsts"], None)
return df
def __get_tank_input(self, elements: ITanksInput) -> pd.DataFrame:
df = self.__get_conventional_tank_node_input(elements)
df = self.__get_valve_characerstics_input(elements, df)
df = self.__get_hammer_valve_type_input(elements, df)
return df
def __get_hydro_tank_input(self, elements: IHydroTanksInput) -> pd.DataFrame:
df = self.__get_base_tank_node_input(elements)
df["InitGasVol"] = elements.InitialVolumeOfGas()
df["InitGasVol"] = self.__dict_to_value(df["InitGasVol"], float)
df["InletOrifDia"] = elements.TankInletOrificeDiameter()
df["InletOrifDia"] = self.__dict_to_value(df["InletOrifDia"], float)
df["RatioOfLosses"] = elements.RatioOfLosses()
df["RatioOfLosses"] = self.__dict_to_value(df["RatioOfLosses"], float)
df["GasLawExponent"] = elements.GasLawExponent()
df["GasLawExponent"] = self.__dict_to_value(
df["GasLawExponent"], float)
df["HasBladder"] = elements.HasBladder()
df["HasBladder"] = self.__dict_to_value(df["HasBladder"], bool)
df["GasPresetPressure"] = elements.GasPresetPressure()
df["GasPresetPressure"] = self.__dict_to_value(
df["GasPresetPressure"], float)
df["MeanLqdElev"] = elements.MeanLiquidElevation()
df["MeanLqdElev"] = self.__dict_to_value(df["MeanLqdElev"], float)
df["AirInOrifDia"] = elements.AirInflowOrificeDiameter()
df["AirInOrifDia"] = self.__dict_to_value(df["AirInOrifDia"], float)
df["AirOutOrifDia"] = elements.AirOutflowOrificeDiameter()
df["AirOutOrifDia"] = self.__dict_to_value(df["AirOutOrifDia"], float)
df["DippingTubeDia"] = elements.DippingTubeDiameter()
df["DippingTubeDia"] = self.__dict_to_value(
df["DippingTubeDia"], float)
df["CompChamberVol"] = elements.CompressionChamberVolume()
df["CompChamberVol"] = self.__dict_to_value(
df["CompChamberVol"], float)
df["TopElevDippingTube"] = elements.TopElevationDippingTube()
df["TopElevDippingTube"] = self.__dict_to_value(
df["TopElevDippingTube"], float)
df["LevelType"] = elements.LevelType()
df["LevelType"] = self.__dict_to_value(df["LevelType"], None)
df["HydroTankType"] = elements.HydroTankType()
df["HydroTankType"] = self.__dict_to_value(df["HydroTankType"], None)
# df["AirOutOrifDia"] = elements.AirOutflowOrificeDiameter()
# df["AirOutOrifDia"] = self.__dict_to_value(df["AirOutOrifDia"], float)
# df["DippingTubeDia"] = elements.DippingTubeDiameter()
# df["DippingTubeDia"] = self.__dict_to_value(df["DippingTubeDia"], float)
# df["CompChamberVol"] = elements.CompressionChamberVolume()
# df["CompChamberVol"] = self.__dict_to_value(df["CompChamberVol"], float)
# df["TopElevDippingTube"] = elements.TopElevationDippingTube()
# df["TopElevDippingTube"] = self.__dict_to_value(df["TopElevDippingTube"], float)
return df
def __get_reservoir_input(self, elements: IReservoirs) -> pd.DataFrame:
df = self.__get_base_node_input(elements)
return df
def __get_tap_input(self, elements: ITaps) -> pd.DataFrame:
df = self.__get_elements_input(elements)
df = self.__get_point_node_input(elements, df)
df = self.__get_associated_elements(elements, df)
return df
# endregion
# region Pipes/Laterals
def __get_pipe_input(self, elements: IPipes) -> pd.DataFrame:
df = self.__get_base_link_input(elements)
df = self.__get_installation_year_input(elements, df)
df["Status"] = elements.Input.PipeStatuses()
df["Status"] = self.__dict_to_value(df["Status"], bool)
df["Diameter"] = elements.Input.Diameters()
df["Diameter"] = self.__dict_to_value(df["Diameter"], float)
df["Material"] = elements.Input.Materials()
df["Material"] = self.__dict_to_value(df["Material"], str)
df["FrictionCoeff"] = elements.Input.FrictionCoefficients()
df["FrictionCoeff"] = self.__dict_to_value(df["FrictionCoeff"], float)
return df
def __get_lateral_input(self, elements: ILateralsInput) -> pd.DataFrame:
df = self.__get_base_link_input(elements)
return df
# endregion
# region Fireflow Nodes
def __get_junction_input(self, elements: IJunctionsInput) -> pd.DataFrame:
df = self.__get_fire_node_input(elements)
return df
def __get_hydrant_input(self, elements: IHydrantsInput) -> pd.DataFrame:
df = self.__get_fire_node_input(elements)
return df
# endregion
# region Pumps / Pump Stations / VSPB
def __get_pump_input(self, elements: IPumpsInput) -> pd.DataFrame:
df = self.__get_base_directed_node_input(elements)
df["InitSpeedFactor"] = elements.InitialRelativeSpeedFactors()
df["InitSpeedFactor"] = self.__dict_to_value(
df["InitSpeedFactor"], float)
df["InitStatus"] = elements.InitialStatus()
df["InitStatus"] = self.__dict_to_value(
df["InitStatus"], pd.Int64Dtype())
# TODO: double check the fields
return df
def __get_pump_stn_input(self, elements: IPumpStationsInput) -> pd.DataFrame:
df = self.__get_base_polygon_input(elements)
return df
def __get_vspb_input(self, elements: IVSPBsInput) -> pd.DataFrame:
df = self.__get_base_pump_node_input(elements)
df["PumpDefinition"] = elements.PumpDefinitions()
df["PumpDefinition"] = self.__dict_to_value(
df["PumpDefinition"], None)
df["PumpDefinitionId"] = df["PumpDefinition"].apply(
lambda p: p.Id if p else None).astype(pd.Int64Dtype())
df["ControlNode"] = elements.ControlNodes()
df["ControlNode"] = self.__dict_to_value(
df["ControlNode"], None)
df["ControlNodeId"] = df["ControlNode"].apply(
lambda p: p.Id if p else None).astype(pd.Int64Dtype())
df["TgtHGL"] = elements.TargetHydraulicGrades()
df["TgtHGL"] = self.__dict_to_value(
df["TgtHGL"], float)
df["MaxSpeedFactor"] = elements.MaximumRelativeSpeedFactors()
df["MaxSpeedFactor"] = self.__dict_to_value(
df["MaxSpeedFactor"], float)
df["NumLagPumps"] = elements.NumberOfLagPumps()
df["NumLagPumps"] = self.__dict_to_value(
df["NumLagPumps"], | pd.Int64Dtype() | pandas.Int64Dtype |
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This file contains layer linting functions.
"""
from collections import OrderedDict
from collections import OrderedDict
import pandas as pd
from .activations import create_activations
from .engine_plan import EnginePlan
class ConvLinter():
"""Convolution layer linter."""
def __init__(self, plan: EnginePlan):
self.plan = plan
self.convs = plan.get_layers_by_type('Convolution')
def tc_lint(self):
"""Search for Convolutions which are not accelerated by TensorCode"""
def is_small_conv(conv):
inputs, _ = create_activations(conv)
n, c, h, w = inputs[0].shape
return c < 32
report = OrderedDict()
# Look for kernels that are not scheduled for xmma (TensorCore
# acceleration)
tc_candidates = self.convs.query(f"precision != \"FP32\"").copy()
# Identify acceleration from tactic name
df = tc_candidates
df = df[df['tactic'].str.contains("imma|hmma|xmma|i88|884", na=False) == False]
for index, conv in df.iterrows():
mitigation = ""
if is_small_conv(conv):
mitigation = "This Convolution has a small number " \
"of input channels so acceleration may not be possible."
report[conv.Name] = OrderedDict({
'name': conv.Name,
'tactic': conv.tactic,
'subtype': conv.subtype,
'hazard': "Convolution is not accelerated.",
'mitigation': mitigation,
'help': "TensorCores accelerate large Convolution and GEMM operations."
})
return report
def mixed_precision_lint(self):
"""Search for Convolutions with Int8 inputs and Float outputs"""
report = OrderedDict()
df = self.convs
df = df.loc[df['precision'] == 'INT8'].copy()
for index, conv in df.iterrows():
inputs, outputs = create_activations(conv)
inf = inputs[0].format[:4]
outf = outputs[0].format[:4]
found = inf == 'Int8' and outf != 'Int8'
if found:
report[conv.Name] = OrderedDict({
'name': conv.Name,
'tactic': conv.tactic,
'subtype': conv.subtype,
'hazard': "Quantized Convolution has float outputs.",
'mitigation': "Consider adding quantization after the convolution.",
'help': "Quantized Convolution with float outputs is ill advised "
"for memory-limited convolutions."
})
return report
def lint(self):
report = self.tc_lint()
report.update(self.mixed_precision_lint())
df = pd.DataFrame.from_dict(report, orient='index')
return df
class ReformatLinter():
"""Reformat layer linter."""
def __init__(self, plan: EnginePlan):
self.plan = plan
self.reformats = plan.get_layers_by_type('Reformat')
def lint(self):
"""Search for conversions between types.
Conversions between layouts are assumed to be optimized."""
report = OrderedDict()
for index, reformat in self.reformats.iterrows():
inputs, outputs = create_activations(reformat)
inf = inputs[0].format[:4]
outf = outputs[0].format[:4]
if inf != outf:
mitigation = ""
if "INT8" in [inf, outf]:
mitigation = "Consider adding quantization around float operations."
report[reformat.Name] = OrderedDict({
'name': reformat.Name,
'origin': reformat['attr.origin'],
'type conversion': f"{inf} -> {outf}",
'shape conversion': f"{inputs[0].shape} -> {outputs[0].shape}",
'hazard': "Reformat layer is converting operand data type.",
'mitigation': mitigation,
'help': "Conversions between float32 and float16 are a red "
"flag, as are conversions between float32/16 and INT8."
})
df = pd.DataFrame.from_dict(report, orient='index')
return df
class SliceLinter():
"""Slice layer linter."""
def __init__(self, plan: EnginePlan):
self.plan = plan
self.slices = plan.get_layers_by_type('Slice')
def lint(self):
"""Search for conversions between types.
Conversions between layouts are assumed to be optimized."""
report = OrderedDict()
for index, slice in self.slices.iterrows():
inputs, outputs = create_activations(slice)
inf = inputs[0].format[:4]
outf = outputs[0].format[:4]
if inf != outf:
mitigation = ""
if "INT8" in [inf, outf]:
mitigation = "Consider adding quantization around float operations."
report[slice.Name] = OrderedDict({
'name': slice.Name,
'type conversion': f"{inf} -> {outf}",
'shape conversion': f"{inputs[0].shape} -> {outputs[0].shape}",
'hazard': "Slice layer is converting operand data type.",
'mitigation': mitigation,
'help': "Conversions between float32 and float16 are a red "
"flag, as are conversions between float32/16 <=> INT8."
})
df = | pd.DataFrame.from_dict(report, orient='index') | pandas.DataFrame.from_dict |
"""Copyright © 2020-present, Swisscom (Schweiz) AG.
All rights reserved.
"""
import pandas as pd
import pytest
from ..metrics.diversity import Diversity
# Evaluate the constructor
@pytest.mark.parametrize('attr, error',
[(['', 0.9, 0.9, ['a', 'b'], 2], ValueError),
(['age', -0.9, 0.9, ['a', 'b'], 2], ValueError),
(['age', 1.1, 0.9, ['a', 'b'], 2], ValueError),
(['age', 0.9, -0.9, ['a', 'b'], 2], ValueError),
(['age', 0.9, 1.1, ['a', 'b'], 2], ValueError),
(['age', 0.9, 0.9, ['a', 'b'], 2.2], TypeError),
(['age', 0.9, 0.9, ['a', 'b'], -2], ValueError),
(['age', 0.9, 0.9, [], 2], ValueError)])
def test_constructor_errors(attr, error):
with pytest.raises(error):
Diversity(*attr)
# Evaluate _compute_hunter_gaston
div_age = Diversity('age', 1., 1., ['20s', '30s'], 2)
@pytest.mark.parametrize('counts, error',
[({}, ValueError),
({'20s': 1}, ValueError),
({'20s': 0, '30s': 0}, ValueError),
({'20s': 0, '30s': -1}, ValueError),
({'20s': 0, '30s': 3.1}, ValueError)])
def test_compute_hunter_gaston_errors(counts, error):
with pytest.raises(error):
div_age._compute_hunter_gaston(counts)
@pytest.mark.parametrize('counts, value',
[({'20s': 5, '30s': 5}, 0.56),
({'20s': 5, '30s': 0}, 0.),
({'20s': 1, '30s': 1}, 1.)])
def test_compute_hunter_gaston_values(counts, value):
assert pytest.approx(div_age._compute_hunter_gaston(counts), 0.01) == value
# Evaluate _get_counts
emps = pd.DataFrame(columns=['age'], data=['20s', '30s', '20s', '20s', '20s', '30s'])
@pytest.mark.parametrize('data, error',
[(pd.DataFrame(), ValueError),
(['20s', '30s'], TypeError),
(pd.DataFrame(columns=['bla'], data=['a', 'b']), ValueError),
(pd.DataFrame(columns=['age'], data=['20s', 'b']), ValueError)])
def test_get_counts_errors(data, error):
with pytest.raises(error):
div_age._get_counts(data)
@pytest.mark.parametrize('data, expected',
[(emps, {'20s': 4, '30s': 2}),
(pd.DataFrame(columns=['age'], data=['20s', '20s', '20s', '20s']), {'20s': 4, '30s': 0})])
def test_get_counts_value(data, expected):
assert div_age._get_counts(data) == expected
# Evaluate _compute_uniform_counts
@pytest.mark.parametrize('counts, error',
[(['bla'], TypeError),
({}, ValueError),
({'a': 0, 'b': 0}, ValueError)])
def test_compute_uniform_counts_errors(counts, error):
with pytest.raises(error):
div_age._compute_uniform_counts(counts)
@pytest.mark.parametrize('counts, value',
[({'A': 4, 'B': 2}, {'A': 3, 'B': 3}),
({'A': 1, 'B': 0}, {'A': 1, 'B': 0}),
({'A': 10, 'B': 0}, {'A': 5, 'B': 5}),
({'A': 10, 'B': 1}, {'A': 6, 'B': 5})])
def test_compute_uniform_counts_values(counts, value):
assert div_age._compute_uniform_counts(counts) == value
# Evaluate compute
@pytest.mark.parametrize('data, error',
[([], ValueError),
(['bla'], TypeError),
(pd.DataFrame(columns=['A'], data=[1]), ValueError)])
def test_compute_errors(data, error):
with pytest.raises(error):
div_age.compute(data)
@pytest.mark.parametrize('data, value, obj',
[(['20s', '30s', '20s', '20s', '20s', '30s'], 0.88, False),
(['20s', '20s', '20s'], 0, False),
(['20s', '30s', '30s', '20s', '20s', '30s'], 1, True)])
def test_compute_values(data, value, obj):
emps = pd.DataFrame(columns=['age'], data=data)
div, check = div_age.compute(emps)
assert pytest.approx(div, 0.001) == value
assert check == obj
# Evaluate distance
@pytest.mark.parametrize('data, error',
[([], ValueError),
(['bla'], TypeError)])
def test_distance_errors(data, error):
with pytest.raises(error):
div_age.distance(data)
@pytest.mark.parametrize('data, expected, value',
[(['20s', '30s', '20s', '20s', '20s', '30s'], {'20s': 0, '30s': 1}, 1),
(['20s', '20s', '20s'], {'20s': 0, '30s': 2}, 1),
(['20s', '30s', '30s', '20s', '20s', '30s'], {'20s': 0, '30s': 0}, 1)])
def test_distance_values(data, expected, value):
emps = pd.DataFrame(columns=['age'], data=data)
increments, new_div = div_age.distance(emps)
assert pytest.approx(new_div, 0.001) == value
assert increments == expected
# Evaluate difficulty
@pytest.mark.parametrize('init, dist, final, error',
[(-1, {}, 0.5, ValueError),
(1.5, {}, 0.5, ValueError),
(0.5, {}, -0.5, ValueError),
(0.5, {}, 1.5, ValueError),
(0.5, {'A': 1}, 0.5, ValueError),
(0.8, {'A': 1}, 0.5, ValueError),
(0.5, {'A': -1}, 0.5, ValueError)])
def test_difficulty_errors(init, dist, final, error):
with pytest.raises(error):
div_age.difficulty(init, dist, final)
@pytest.mark.parametrize('init, dist, final, value',
[(0, {'A': 1}, 1, 1),
(0.9, {'A': 10}, 0.91, 1000)])
def test_difficulty_values(init, dist, final, value):
assert pytest.approx(div_age.difficulty(init, dist, final), 0.01) == value
# Evaluate visualise
@pytest.mark.parametrize('data, error',
[( | pd.DataFrame() | pandas.DataFrame |
from reprobench.core.db import ParameterGroup, Run, db
from reprobench.executors.db import RunStatistic
from reprobench.utils import import_class
from .base import PandasExporter
try:
import pandas as pd
except ImportError:
pass
class RunTable(PandasExporter):
@classmethod
def get_dataframe(cls, config):
joins = config.get("joins", [])
query = Run.select()
for model_class in joins:
model = import_class(model_class)
query = query.join_from(Run, model).select_extend(
*model._meta.fields.values()
)
sql, params = query.sql()
return pd.read_sql_query(sql, db, params=params)
class RunSummaryTable(PandasExporter):
DEFAULT_COLUMNS = ("cpu_time", "wall_time", "max_memory")
@classmethod
def get_dataframe(cls, config):
columns = config.get("columns", cls.DEFAULT_COLUMNS)
tool_names = [
f"{group.tool_id}_{group.name}" for group in ParameterGroup.select()
]
multiindex = pd.MultiIndex.from_product((tool_names, columns))
df = pd.DataFrame(index=multiindex).transpose()
for group in ParameterGroup.select():
tool_name = f"{group.tool_id}_{group.name}"
query = (
RunStatistic.select()
.join(Run)
.where(Run.tool_id == group.tool_id)
.where(Run.parameter_group_id == group.id)
)
sql, params = query.sql()
tool_df = | pd.read_sql(sql, db, params=params) | pandas.read_sql |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 7 12:22:06 2019
@author: YASH
"""
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
import csv
import pandas as pd
import numpy
#Data cleaning Functions:
def isEnglish(s):
try:
s.encode('ascii')
except UnicodeEncodeError:
return False
else:
return True
#The following function removes the part of the string that contains the substring eg. if
#substring = 'http' , then http://www.google.com is removed, that means, remove until a space is found
def rem_substring(tweets,substring):
m=0;
for i in tweets:
if (substring in i):
#while i.find(substring)!=-1:
k=i.find(substring)
d=i.find(' ',k,len(i))
if d!=-1: #substring is present somwhere in the middle(not the end of the string)
i=i[:k]+i[d:]
else: #special case when the substring is present at the end, we needn't append the
i=i[:k] #substring after the junk string to our result
tweets[m]=i #store the result in tweets "list"
m+= 1
return tweets
def removeNonEnglish(tweets):
result=[]
for i in tweets:
if isEnglish(i):
result.append(i)
return result
#the following function converts all the text to the lower case
def lower_case(tweets):
result=[]
for i in tweets:
result.append(i.lower())
return result
def rem_punctuation(tweets):
#print(len(tweets))
m=0
validLetters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ "
for i in tweets:
x = ""
for j in i:
if (j in validLetters)==True:
x += j
tweets[m]=x
m=m+1
return tweets
def stop_words(tweets):
#Removal of Stop words like is, am , be, are, was etc.
stop_words1 = set(stopwords.words('english'))
indi=0
for tweet in tweets:
new_s=[]
Br_tweet = word_tokenize(tweet)
for word in Br_tweet:
if (word not in stop_words1):
new_s.append(word)
et=" ".join(new_s)
tweets[indi]=et
indi+=1
return tweets
def score(college_name):
filename = 'data_emotions_words_list.csv'
pos_file_name= "Pos_tagged_" + college_name + ".csv"
POS=pd.read_csv(pos_file_name)
POS_tweets=POS['POS_Tweet'].values
adverb1=pd.read_csv("adverb.csv")
verb1=pd.read_csv("verb.csv")
''' Verb and adverb are dictionaries having values for verbs and adverbs'''
verb={};adverb={}
l=adverb1['value'].values
j=0
for i in adverb1['adverb'].values:
adverb[i]=l[j]
j+=1
l=verb1['Value'].values
j=0
for i in verb1['Verb'].values:
verb[i]=l[j]
j+=1
''' Add the adjectives in the dictionary'''
Adjectives={}
df= | pd.read_csv("data_emotions_words_list.csv") | pandas.read_csv |
import argparse
import numpy as np
import os
import pandas as pd
import pdb
def add_dataset_info(results_raw: pd.DataFrame, task_metadata: pd.DataFrame):
results_raw['tid'] = [int(x.split('/')[-1]) for x in results_raw['id']]
task_metadata['ClassRatio'] = task_metadata['MinorityClassSize'] / task_metadata['NumberOfInstances']
results_raw = results_raw.merge(task_metadata, on=['tid'])
return results_raw
def mean_score(df: pd.DataFrame, column: str = 'result'):
return round(df[column].mean(), 4)
def filter_type(df: pd.DataFrame):
return df[df['type'] == 'binary'].append(df[df['type'] == 'multiclass']) #df[df['type'] == 'regression']
def filter_samples(df, samples=100000, lower=True):
return df[df['NumberOfInstances'] < samples] if lower else df[df['NumberOfInstances'] >= samples]
def filter_features(df, features=100, lower=True):
return df[df['NumberOfFeatures'] < features] if lower else df[df['NumberOfFeatures'] >= features]
def filter_duration(df, duration=40000):
return df[df['duration'] < duration]
def compute_win_lose(base, tie, pseudo_label):
total = max(len(base) + len(pseudo_label) + len(tie), 1)
return round((len(pseudo_label) + 0.5 * len(tie)) / total, 4), round((len(base) + 0.5 * len(tie)) / total, 4)
def print_inference_speedup(df1, df2):
relative_speedups = []
absolute_speedups = []
for task in df1['task'].unique():
df1_rows = df1[df1["task"] == task]
df2_rows = df2[df2["task"] == task]
for fold in df1_rows["fold"].unique():
row1, row2 = df1_rows[df1_rows["fold"] == fold], df2_rows[df2_rows["fold"] == fold]
if len(row1) == 0 or len(row2) == 0 or row1['predict_duration'].isna().item() or row2['predict_duration'].isna().item():
continue
df1_time, df2_time = row1['predict_duration'].item(), row2['predict_duration'].item()
if df1_time == 0 or df2_time == 0:
continue
relative_speedups.append((df1_time - df2_time)/min(df1_time, df2_time))
absolute_speedups.append(df1_time - df2_time)
print(f"Average Relative Speedup: {round(np.mean(relative_speedups), 4)}, Average Absolute Speedup: {round(np.mean(absolute_speedups), 4)}")
def compare_dfs_improvement(df1, df2):
metric = "acc"
binary, multiclass, regression = [], [], []
for task in df1['task'].unique():
df1_rows = df1[df1["task"] == task]
df2_rows = df2[df2["task"] == task]
for fold in df1_rows["fold"].unique():
row1, row2 = df1_rows[df1_rows["fold"] == fold], df2_rows[df2_rows["fold"] == fold]
if len(row1) == 0 or len(row2) == 0 or row1[metric].isna().item() or row2[metric].isna().item():
continue
df1_score, df2_score = row1[metric].item(), row2[metric].item()
problem_type = df1_rows.iloc[0]['type']
try:
if problem_type == "binary":
score = (df2_score - df1_score) / df1_score if df1_score > df2_score else (df2_score - df1_score) / df2_score
binary.append(score)
elif problem_type == "multiclass":
score = (df1_score - df2_score) / df1_score if df1_score < df2_score else (df1_score - df2_score) / df2_score
multiclass.append(score)
else:
score = (df1_score - df2_score) / df1_score if df1_score < df2_score else (df1_score - df2_score) / df2_score
regression.append(score)
except:
pass
binary_improvement = round(np.mean(binary), 4)
multiclass_improvement = round(np.mean(multiclass), 4)
regression_improvement = round(np.mean(regression), 4)
total_improvement = round(np.mean(binary + multiclass + regression), 4)
return total_improvement, binary_improvement, multiclass_improvement, regression_improvement
def compare_dfs(df1, df2, grouped=False):
df1_better, equal_performance, df2_better = [], [], []
metric = "acc"
for task in df1['task'].unique():
df1_rows = df1[df1["task"] == task]
df2_rows = df2[df2["task"] == task]
if grouped:
if len(df1_rows) > 0:
df1_score = df1_rows[metric].dropna().mean()
if df1_score != df1_score:
continue
else:
continue
if len(df2_rows) > 0:
df2_score = df2_rows[metric].dropna().mean()
if df2_score != df2_score:
continue
else:
continue
if df1_score > df2_score:
df1_better.append(task)
elif df1_score < df2_score:
df2_better.append(task)
else:
equal_performance.append(task)
else:
for fold in df1_rows["fold"].unique():
row1, row2 = df1_rows[df1_rows["fold"] == fold], df2_rows[df2_rows["fold"] == fold]
if len(row1) == 0 or len(row2) == 0 or row1[metric].isna().item() or row2[metric].isna().item():
continue
score1, score2 = row1[metric].item(), row2[metric].item()
if score1 > score2:
df1_better.append(task+f"_{fold}")
elif score1 < score2:
df2_better.append(task+f"_{fold}")
else:
equal_performance.append(task+f"_{fold}")
return df1_better, equal_performance, df2_better
def print_miscellaneous(df1, df2):
metric = "acc"
score_diffs = []
for task in df1['task'].unique():
df1_rows = df1[df1["task"] == task]
df2_rows = df2[df2["task"] == task]
if len(df1_rows) > 0:
df1_score = df1_rows[metric].dropna().mean()
if df1_score != df1_score:
continue
else:
continue
if len(df2_rows) > 0:
df2_score = df2_rows[metric].dropna().mean()
if df2_score != df2_score:
continue
else:
continue
problem_type = df1_rows.iloc[0]['type']
if problem_type == "binary":
score = (df2_score - df1_score) / df1_score if df1_score > df2_score else (df2_score - df1_score) / df2_score
elif problem_type == "multiclass":
score = (df1_score - df2_score) / df1_score if df1_score < df2_score else (df1_score - df2_score) / df2_score
else:
score = (df1_score - df2_score) / df1_score if df1_score < df2_score else (df1_score - df2_score) / df2_score
score_diffs.append((task, score))
score_diffs = sorted(score_diffs, key=lambda info: info[1])
score_diffs = [diff[1] for diff in score_diffs]
print(f"Relative Error Reduction Info: {round(np.mean(score_diffs), 4)} ± {round(np.std(score_diffs), 4)}, ({round(score_diffs[0], 4)}, {round(score_diffs[-1], 4)})")
lower_quantile, upper_quantile = np.quantile(score_diffs, 0.025), np.quantile(score_diffs, 0.975)
score_diffs = [diff for diff in score_diffs if lower_quantile < diff < upper_quantile]
print(f"Relative Error Reduction Info (mean ± 2 * sigma): {round(np.mean(score_diffs), 4)} ± {round(np.std(score_diffs), 4)}, ({round(score_diffs[0], 4)}, {round(score_diffs[-1], 4)})")
print(f"Number of Errored Runs (Base/pseudo_label): {len(base[~base['info'].isna()])}/{len(pseudo_label[~pseudo_label['info'].isna()])}")
def print_automl_comparisons(base: pd.DataFrame, pseudo_label: pd.DataFrame, others: pd.DataFrame):
print("==============================================================================")
rows = []
for framework in others['framework'].unique():
other = others[others['framework'] == framework]
first_better, equal_performance, second_better = compare_dfs(other, base)
base_win, base_lose = compute_win_lose(first_better, equal_performance, second_better)
first_better, equal_performance, second_better = compare_dfs(other, pseudo_label)
pseudo_label_win, pseudo_label_lose = compute_win_lose(first_better, equal_performance, second_better)
base_improvement, _, _, _ = compare_dfs_improvement(other, base)
pseudo_label_improvement, _, _, _ = compare_dfs_improvement(other, pseudo_label)
rows.append({'Framework': framework, 'Base Win Rate': base_win, 'pseudo_label Win Rate': pseudo_label_win,
'Base Error Reduction': base_improvement, 'pseudo_label Error Reduction': pseudo_label_improvement,
'Win Rate Improvement': pseudo_label_win - base_win, 'Error Reduction Improvement': pseudo_label_improvement - base_improvement})
df = | pd.DataFrame(rows) | pandas.DataFrame |
import pandas as pd
from liuy.Interface.BaseSampler import BaseSampler
from liuy.utils.ComputeLoss import LiuyComputeLoss
import copy
import re
def name2id(losses_name):
losses_id = []
for item in losses_name:
loss = {}
loss['loss_mask'] = item['loss_mask']
digit = re.findall(r"\d+\d*", item['file_name'])
img_id = digit[2]
img_id = int(img_id)
# img_id = str(img_id)
loss['image_id'] = img_id
losses_id.append(loss)
return losses_id
def decrease_sort_losses(losses):
if len(losses) <= 1:
return losses
pivot = losses[len(losses) // 2]
left = [x for x in losses if x['loss_mask'] > pivot['loss_mask']]
middle = [x for x in losses if x['loss_mask'] == pivot['loss_mask']]
right = [x for x in losses if x['loss_mask'] < pivot['loss_mask']]
return decrease_sort_losses(left) + middle + decrease_sort_losses(right)
def increase_sort_losses(losses):
if len(losses) <= 1:
return losses
pivot = losses[len(losses) // 2]
left = [x for x in losses if x['loss_mask'] < pivot['loss_mask']]
middle = [x for x in losses if x['loss_mask'] == pivot['loss_mask']]
right = [x for x in losses if x['loss_mask'] > pivot['loss_mask']]
return increase_sort_losses(left) + middle + increase_sort_losses(right)
class LossSampler():
def __init__(self, sampler_name):
self.sampler_name = sampler_name
def group_image(slef, image2class):
"""
:param image2class: a nd array, m * 2, m means number of feature,
the first column is the image id, and the second is the class id
:return: list of list, each list is contain the same class' image id
"""
image2class_pd = | pd.DataFrame(image2class) | pandas.DataFrame |
import abc
import re
from abc import ABCMeta, abstractmethod, abstractproperty
from datetime import timedelta
from time import sleep
import numpy as np
import pandas as pd
from catalyst.assets._assets import TradingPair
from logbook import Logger
from catalyst.data.data_portal import BASE_FIELDS
from catalyst.exchange.bundle_utils import get_start_dt, \
get_delta, get_periods, get_adj_dates
from catalyst.exchange.exchange_bundle import ExchangeBundle
from catalyst.exchange.exchange_errors import MismatchingBaseCurrencies, \
InvalidOrderStyle, BaseCurrencyNotFoundError, SymbolNotFoundOnExchange, \
InvalidHistoryFrequencyError, MismatchingFrequencyError, \
BundleNotFoundError, NoDataAvailableOnExchange, PricingDataNotLoadedError
from catalyst.exchange.exchange_execution import ExchangeStopLimitOrder, \
ExchangeLimitOrder, ExchangeStopOrder
from catalyst.exchange.exchange_portfolio import ExchangePortfolio
from catalyst.exchange.exchange_utils import get_exchange_symbols
from catalyst.finance.order import ORDER_STATUS
from catalyst.finance.transaction import Transaction
from catalyst.constants import LOG_LEVEL
log = Logger('Exchange', level=LOG_LEVEL)
class Exchange:
__metaclass__ = ABCMeta
def __init__(self):
self.name = None
self.assets = {}
self._portfolio = None
self.minute_writer = None
self.minute_reader = None
self.base_currency = None
self.num_candles_limit = None
self.max_requests_per_minute = None
self.request_cpt = None
self.bundle = ExchangeBundle(self)
@property
def positions(self):
return self.portfolio.positions
@property
def portfolio(self):
"""
Return the Portfolio
:return:
"""
if self._portfolio is None:
self._portfolio = ExchangePortfolio(
start_date=pd.Timestamp.utcnow()
)
self.synchronize_portfolio()
return self._portfolio
@abstractproperty
def account(self):
pass
@abstractproperty
def time_skew(self):
pass
def ask_request(self):
"""
Asks permission to issue a request to the exchange.
The primary purpose is to avoid hitting rate limits.
The application will pause if the maximum requests per minute
permitted by the exchange is exceeded.
:return boolean:
"""
now = pd.Timestamp.utcnow()
if not self.request_cpt:
self.request_cpt = dict()
self.request_cpt[now] = 0
return True
cpt_date = self.request_cpt.keys()[0]
cpt = self.request_cpt[cpt_date]
if now > cpt_date + timedelta(minutes=1):
self.request_cpt = dict()
self.request_cpt[now] = 0
return True
if cpt >= self.max_requests_per_minute:
delta = now - cpt_date
sleep_period = 60 - delta.total_seconds()
sleep(sleep_period)
now = pd.Timestamp.utcnow()
self.request_cpt = dict()
self.request_cpt[now] = 0
return True
else:
self.request_cpt[cpt_date] += 1
def get_symbol(self, asset):
"""
Get the exchange specific symbol of the given asset.
:param asset: Asset
:return: symbol: str
"""
symbol = None
for key in self.assets:
if not symbol and self.assets[key].symbol == asset.symbol:
symbol = key
if not symbol:
raise ValueError('Currency %s not supported by exchange %s' %
(asset['symbol'], self.name.title()))
return symbol
def get_symbols(self, assets):
"""
Get a list of symbols corresponding to each given asset.
:param assets: Asset[]
:return:
"""
symbols = []
for asset in assets:
symbols.append(self.get_symbol(asset))
return symbols
def get_assets(self, symbols=None):
assets = []
if symbols is not None:
for symbol in symbols:
asset = self.get_asset(symbol)
assets.append(asset)
else:
for key in self.assets:
assets.append(self.assets[key])
return assets
def get_asset(self, symbol):
"""
Find an Asset on the current exchange based on its Catalyst symbol
:param symbol: the [target]_[base] currency pair symbol
:return: Asset
"""
asset = None
for key in self.assets:
if not asset and self.assets[key].symbol.lower() == symbol.lower():
asset = self.assets[key]
if not asset:
supported_symbols = [pair.symbol.encode('utf-8') for pair in
self.assets.values()]
raise SymbolNotFoundOnExchange(
symbol=symbol,
exchange=self.name.title(),
supported_symbols=supported_symbols
)
return asset
def fetch_symbol_map(self):
return get_exchange_symbols(self.name)
def load_assets(self):
"""
Populate the 'assets' attribute with a dictionary of Assets.
The key of the resulting dictionary is the exchange specific
currency pair symbol. The universal symbol is contained in the
'symbol' attribute of each asset.
Notes
-----
The sid of each asset is calculated based on a numeric hash of the
universal symbol. This simple approach avoids maintaining a mapping
of sids.
This method can be overridden if an exchange offers equivalent data
via its api.
"""
symbol_map = self.fetch_symbol_map()
for exchange_symbol in symbol_map:
asset = symbol_map[exchange_symbol]
if 'start_date' in asset:
start_date = pd.to_datetime(asset['start_date'], utc=True)
else:
start_date = None
if 'end_date' in asset:
end_date = | pd.to_datetime(asset['end_date'], utc=True) | pandas.to_datetime |
# *****************************************************************************
# Copyright (c) 2019-2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import itertools
import os
import platform
import string
import unittest
from copy import deepcopy
from itertools import product
import numpy as np
import pandas as pd
from numba.core.errors import TypingError
from sdc.hiframes.rolling import supported_rolling_funcs
from sdc.tests.test_base import TestCase
from sdc.tests.test_series import gen_frand_array
from sdc.tests.test_utils import (count_array_REPs, count_parfor_REPs,
skip_numba_jit, skip_sdc_jit,
test_global_input_data_float64)
LONG_TEST = (int(os.environ['SDC_LONG_ROLLING_TEST']) != 0
if 'SDC_LONG_ROLLING_TEST' in os.environ else False)
test_funcs = ('mean', 'max',)
if LONG_TEST:
# all functions except apply, cov, corr
test_funcs = supported_rolling_funcs[:-3]
def rolling_std_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).std(ddof)
def rolling_var_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).var(ddof)
class TestRolling(TestCase):
@skip_numba_jit
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = self.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@skip_numba_jit
def test_fixed1(self):
# test sequentially with manually created dfs
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed2(self):
# test sequentially with generated dfs
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_apply1(self):
# test sequentially with manually created dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed_apply2(self):
# test sequentially with generated dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_parallel1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).sum()
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_fixed_parallel_apply1(self):
def test_impl(n, w, center):
df = pd.DataFrame({'B': np.arange(n)})
R = df.rolling(w, center=center).apply(lambda a: a.sum())
return R.B.sum()
hpat_func = self.jit(test_impl)
sizes = (121,)
wins = (5,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 4, 5, 10, 11)
centers = (False, True)
for args in itertools.product(sizes, wins, centers):
self.assertEqual(hpat_func(*args), test_impl(*args),
"rolling fixed window with {}".format(args))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_variable1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')]})
df2 = pd.DataFrame({'B': [0, 1, 2, -2, 4],
'time': [pd.Timestamp('20130101 09:00:01'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:04'),
pd.Timestamp('20130101 09:00:09')]})
wins = ('2s',)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
# XXX: skipping min/max for this test since the behavior of Pandas
# is inconsistent: it assigns NaN to last output instead of 4!
if func_name not in ('min', 'max'):
pd.testing.assert_frame_equal(hpat_func(df1), test_impl(df1))
pd.testing.assert_frame_equal(hpat_func(df2), test_impl(df2))
@skip_numba_jit
def test_variable2(self):
# test sequentially with generated dfs
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
sizes = (1, 2, 10, 11, 121, 1000)
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').{}()\n".format(w, func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
time = pd.date_range(start='1/1/2018', periods=n, freq='s')
df = pd.DataFrame({'B': np.arange(n), 'time': time})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
def test_variable_apply1(self):
# test sequentially with manually created dfs
df1 = pd.DataFrame({'B': [0, 1, 2, np.nan, 4],
'time': [pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:05'),
pd.Timestamp('20130101 09:00:06')]})
df2 = pd.DataFrame({'B': [0, 1, 2, -2, 4],
'time': [pd.Timestamp('20130101 09:00:01'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03'),
pd.Timestamp('20130101 09:00:04'),
pd.Timestamp('20130101 09:00:09')]})
wins = ('2s',)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# all functions except apply
for w in wins:
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
pd.testing.assert_frame_equal(hpat_func(df1), test_impl(df1))
pd.testing.assert_frame_equal(hpat_func(df2), test_impl(df2))
@skip_numba_jit
def test_variable_apply2(self):
# test sequentially with generated dfs
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# TODO: this crashes on Travis (3 process config) with size 1
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w in wins:
func_text = "def test_impl(df):\n return df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
time = pd.date_range(start='1/1/2018', periods=n, freq='s')
df = pd.DataFrame({'B': np.arange(n), 'time': time})
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
@unittest.skipIf(platform.system() == 'Windows', "ValueError: time must be monotonic")
def test_variable_parallel1(self):
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# XXX: Pandas returns time = [np.nan] for size==1 for some reason
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w, func_name in itertools.product(wins, test_funcs):
func_text = "def test_impl(n):\n"
func_text += " df = pd.DataFrame({'B': np.arange(n), 'time': "
func_text += " pd.DatetimeIndex(np.arange(n) * 1000000000)})\n"
func_text += " res = df.rolling('{}', on='time').{}()\n".format(w, func_name)
func_text += " return res.B.sum()\n"
loc_vars = {}
exec(func_text, {'pd': pd, 'np': np}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
@unittest.skipIf(platform.system() == 'Windows', "ValueError: time must be monotonic")
def test_variable_apply_parallel1(self):
wins = ('2s',)
sizes = (121,)
if LONG_TEST:
wins = ('1s', '2s', '3s', '4s')
# XXX: Pandas returns time = [np.nan] for size==1 for some reason
sizes = (2, 10, 11, 121, 1000)
# all functions except apply
for w in wins:
func_text = "def test_impl(n):\n"
func_text += " df = pd.DataFrame({'B': np.arange(n), 'time': "
func_text += " pd.DatetimeIndex(np.arange(n) * 1000000000)})\n"
func_text += " res = df.rolling('{}', on='time').apply(lambda a: a.sum())\n".format(w)
func_text += " return res.B.sum()\n"
loc_vars = {}
exec(func_text, {'pd': pd, 'np': np}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n in sizes:
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_series_fixed1(self):
# test series rolling functions
# all functions except apply
S1 = pd.Series([0, 1, 2, np.nan, 4])
S2 = pd.Series([0, 1, 2, -2, 4])
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(S, w, c):\n return S.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for args in itertools.product(wins, centers):
pd.testing.assert_series_equal(hpat_func(S1, *args), test_impl(S1, *args))
pd.testing.assert_series_equal(hpat_func(S2, *args), test_impl(S2, *args))
# test apply
def apply_test_impl(S, w, c):
return S.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(apply_test_impl)
for args in itertools.product(wins, centers):
pd.testing.assert_series_equal(hpat_func(S1, *args), apply_test_impl(S1, *args))
pd.testing.assert_series_equal(hpat_func(S2, *args), apply_test_impl(S2, *args))
@skip_numba_jit
def test_series_cov1(self):
# test series rolling functions
# all functions except apply
S1 = pd.Series([0, 1, 2, np.nan, 4])
S2 = pd.Series([0, 1, 2, -2, 4])
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
def test_impl(S, S2, w, c):
return S.rolling(w, center=c).cov(S2)
hpat_func = self.jit(test_impl)
for args in itertools.product([S1, S2], [S1, S2], wins, centers):
pd.testing.assert_series_equal(hpat_func(*args), test_impl(*args))
pd.testing.assert_series_equal(hpat_func(*args), test_impl(*args))
def test_impl2(S, S2, w, c):
return S.rolling(w, center=c).corr(S2)
hpat_func = self.jit(test_impl2)
for args in itertools.product([S1, S2], [S1, S2], wins, centers):
pd.testing.assert_series_equal(hpat_func(*args), test_impl2(*args))
pd.testing.assert_series_equal(hpat_func(*args), test_impl2(*args))
@skip_numba_jit
def test_df_cov1(self):
# test series rolling functions
# all functions except apply
df1 = pd.DataFrame({'A': [0, 1, 2, np.nan, 4], 'B': np.ones(5)})
df2 = pd.DataFrame({'A': [0, 1, 2, -2, 4], 'C': np.ones(5)})
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
def test_impl(df, df2, w, c):
return df.rolling(w, center=c).cov(df2)
hpat_func = self.jit(test_impl)
for args in itertools.product([df1, df2], [df1, df2], wins, centers):
pd.testing.assert_frame_equal(hpat_func(*args), test_impl(*args))
pd.testing.assert_frame_equal(hpat_func(*args), test_impl(*args))
def test_impl2(df, df2, w, c):
return df.rolling(w, center=c).corr(df2)
hpat_func = self.jit(test_impl2)
for args in itertools.product([df1, df2], [df1, df2], wins, centers):
pd.testing.assert_frame_equal(hpat_func(*args), test_impl2(*args))
pd.testing.assert_frame_equal(hpat_func(*args), test_impl2(*args))
def _get_assert_equal(self, obj):
if isinstance(obj, pd.Series):
return pd.testing.assert_series_equal
elif isinstance(obj, pd.DataFrame):
return pd.testing.assert_frame_equal
elif isinstance(obj, np.ndarray):
return np.testing.assert_array_equal
return self.assertEqual
def _test_rolling_unsupported_values(self, obj):
def test_impl(obj, window, min_periods, center,
win_type, on, axis, closed):
return obj.rolling(window, min_periods, center,
win_type, on, axis, closed).min()
hpat_func = self.jit(test_impl)
with self.assertRaises(ValueError) as raises:
hpat_func(obj, -1, None, False, None, None, 0, None)
self.assertIn('window must be non-negative', str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, -1, False, None, None, 0, None)
self.assertIn('min_periods must be >= 0', str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, 2, False, None, None, 0, None)
self.assertIn('min_periods must be <= window', str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, 2, False, None, None, 0, None)
self.assertIn('min_periods must be <= window', str(raises.exception))
msg_tmpl = 'Method rolling(). The object {}\n expected: {}'
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, None, True, None, None, 0, None)
msg = msg_tmpl.format('center', 'False')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, None, False, 'None', None, 0, None)
msg = msg_tmpl.format('win_type', 'None')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, None, False, None, 'None', 0, None)
msg = msg_tmpl.format('on', 'None')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, None, False, None, None, 1, None)
msg = msg_tmpl.format('axis', '0')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 1, None, False, None, None, 0, 'None')
msg = msg_tmpl.format('closed', 'None')
self.assertIn(msg, str(raises.exception))
def _test_rolling_unsupported_types(self, obj):
def test_impl(obj, window, min_periods, center,
win_type, on, axis, closed):
return obj.rolling(window, min_periods, center,
win_type, on, axis, closed).min()
hpat_func = self.jit(test_impl)
msg_tmpl = 'Method rolling(). The object {}\n given: {}\n expected: {}'
with self.assertRaises(TypingError) as raises:
hpat_func(obj, '1', None, False, None, None, 0, None)
msg = msg_tmpl.format('window', 'unicode_type', 'int')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, '1', False, None, None, 0, None)
msg = msg_tmpl.format('min_periods', 'unicode_type', 'None, int')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, None, 0, None, None, 0, None)
msg = msg_tmpl.format('center', 'int64', 'bool')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, None, False, -1, None, 0, None)
msg = msg_tmpl.format('win_type', 'int64', 'str')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, None, False, None, -1, 0, None)
msg = msg_tmpl.format('on', 'int64', 'str')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, None, False, None, None, None, None)
msg = msg_tmpl.format('axis', 'none', 'int, str')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, None, False, None, None, 0, -1)
msg = msg_tmpl.format('closed', 'int64', 'str')
self.assertIn(msg, str(raises.exception))
def _test_rolling_apply_mean(self, obj):
def test_impl(obj, window, min_periods):
def func(x):
if len(x) == 0:
return np.nan
return x.mean()
return obj.rolling(window, min_periods).apply(func)
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window + 1, 2):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_apply_unsupported_types(self, obj):
def test_impl(obj, raw):
def func(x):
if len(x) == 0:
return np.nan
return np.median(x)
return obj.rolling(3).apply(func, raw=raw)
hpat_func = self.jit(test_impl)
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1)
msg = 'Method rolling.apply(). The object raw\n given: int64\n expected: bool'
self.assertIn(msg, str(raises.exception))
def _test_rolling_apply_args(self, obj):
def test_impl(obj, window, min_periods, q):
def func(x, q):
if len(x) == 0:
return np.nan
return np.quantile(x, q)
return obj.rolling(window, min_periods).apply(func, raw=None, args=(q,))
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window + 1, 2):
for q in [0.25, 0.5, 0.75]:
with self.subTest(obj=obj, window=window,
min_periods=min_periods, q=q):
jit_result = hpat_func(obj, window, min_periods, q)
ref_result = test_impl(obj, window, min_periods, q)
assert_equal(jit_result, ref_result)
def _test_rolling_corr(self, obj, other):
def test_impl(obj, window, min_periods, other):
return obj.rolling(window, min_periods).corr(other)
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window, 2):
with self.subTest(obj=obj, other=other,
window=window, min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods, other)
ref_result = test_impl(obj, window, min_periods, other)
assert_equal(jit_result, ref_result)
def _test_rolling_corr_with_no_other(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).corr(pairwise=False)
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window, 2):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_corr_unsupported_types(self, obj):
def test_impl(obj, pairwise):
return obj.rolling(3, 3).corr(pairwise=pairwise)
hpat_func = self.jit(test_impl)
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1)
msg = 'Method rolling.corr(). The object pairwise\n given: int64\n expected: bool'
self.assertIn(msg, str(raises.exception))
def _test_rolling_count(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).count()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window + 1, 2):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_cov(self, obj, other):
def test_impl(obj, window, min_periods, other, ddof):
return obj.rolling(window, min_periods).cov(other, ddof=ddof)
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods, ddof in product(range(0, window, 2), [0, 1]):
with self.subTest(obj=obj, other=other, window=window,
min_periods=min_periods, ddof=ddof):
jit_result = hpat_func(obj, window, min_periods, other, ddof)
ref_result = test_impl(obj, window, min_periods, other, ddof)
assert_equal(jit_result, ref_result)
def _test_rolling_cov_with_no_other(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).cov(pairwise=False)
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window, 2):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_cov_unsupported_types(self, obj):
def test_impl(obj, pairwise, ddof):
return obj.rolling(3, 3).cov(pairwise=pairwise, ddof=ddof)
hpat_func = self.jit(test_impl)
msg_tmpl = 'Method rolling.cov(). The object {}\n given: {}\n expected: {}'
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 1, 1)
msg = msg_tmpl.format('pairwise', 'int64', 'bool')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, None, '1')
msg = msg_tmpl.format('ddof', 'unicode_type', 'int')
self.assertIn(msg, str(raises.exception))
def _test_rolling_kurt(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).kurt()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(4, len(obj) + 1):
for min_periods in range(window + 1):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
ref_result = test_impl(obj, window, min_periods)
jit_result = hpat_func(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_max(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).max()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
# python implementation crashes if window = 0, jit works correctly
for window in range(1, len(obj) + 2):
for min_periods in range(window + 1):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_mean(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).mean()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(len(obj) + 2):
for min_periods in range(window):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_median(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).median()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods in range(0, window + 1, 2):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_min(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).min()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
# python implementation crashes if window = 0, jit works correctly
for window in range(1, len(obj) + 2):
for min_periods in range(window + 1):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_quantile(self, obj):
def test_impl(obj, window, min_periods, quantile):
return obj.rolling(window, min_periods).quantile(quantile)
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
quantiles = [0, 0.25, 0.5, 0.75, 1]
for window in range(0, len(obj) + 3, 2):
for min_periods, q in product(range(0, window, 2), quantiles):
with self.subTest(obj=obj, window=window,
min_periods=min_periods, quantiles=q):
jit_result = hpat_func(obj, window, min_periods, q)
ref_result = test_impl(obj, window, min_periods, q)
assert_equal(jit_result, ref_result)
def _test_rolling_quantile_exception_unsupported_types(self, obj):
def test_impl(obj, quantile, interpolation):
return obj.rolling(3, 2).quantile(quantile, interpolation)
hpat_func = self.jit(test_impl)
msg_tmpl = 'Method rolling.quantile(). The object {}\n given: {}\n expected: {}'
with self.assertRaises(TypingError) as raises:
hpat_func(obj, '0.5', 'linear')
msg = msg_tmpl.format('quantile', 'unicode_type', 'float')
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
hpat_func(obj, 0.5, None)
msg = msg_tmpl.format('interpolation', 'none', 'str')
self.assertIn(msg, str(raises.exception))
def _test_rolling_quantile_exception_unsupported_values(self, obj):
def test_impl(obj, quantile, interpolation):
return obj.rolling(3, 2).quantile(quantile, interpolation)
hpat_func = self.jit(test_impl)
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 2, 'linear')
self.assertIn('quantile value not in [0, 1]', str(raises.exception))
with self.assertRaises(ValueError) as raises:
hpat_func(obj, 0.5, 'lower')
self.assertIn('interpolation value not "linear"', str(raises.exception))
def _test_rolling_skew(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).skew()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(3, len(obj) + 1):
for min_periods in range(window + 1):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
ref_result = test_impl(obj, window, min_periods)
jit_result = hpat_func(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_std(self, obj):
test_impl = rolling_std_usecase
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods, ddof in product(range(0, window, 2), [0, 1]):
with self.subTest(obj=obj, window=window,
min_periods=min_periods, ddof=ddof):
jit_result = hpat_func(obj, window, min_periods, ddof)
ref_result = test_impl(obj, window, min_periods, ddof)
assert_equal(jit_result, ref_result)
def _test_rolling_std_exception_unsupported_ddof(self, obj):
test_impl = rolling_std_usecase
hpat_func = self.jit(test_impl)
window, min_periods, invalid_ddof = 3, 2, '1'
with self.assertRaises(TypingError) as raises:
hpat_func(obj, window, min_periods, invalid_ddof)
msg = 'Method rolling.std(). The object ddof\n given: unicode_type\n expected: int'
self.assertIn(msg, str(raises.exception))
def _test_rolling_sum(self, obj):
def test_impl(obj, window, min_periods):
return obj.rolling(window, min_periods).sum()
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(len(obj) + 2):
for min_periods in range(window):
with self.subTest(obj=obj, window=window,
min_periods=min_periods):
jit_result = hpat_func(obj, window, min_periods)
ref_result = test_impl(obj, window, min_periods)
assert_equal(jit_result, ref_result)
def _test_rolling_var(self, obj):
test_impl = rolling_var_usecase
hpat_func = self.jit(test_impl)
assert_equal = self._get_assert_equal(obj)
for window in range(0, len(obj) + 3, 2):
for min_periods, ddof in product(range(0, window, 2), [0, 1]):
with self.subTest(obj=obj, window=window,
min_periods=min_periods, ddof=ddof):
jit_result = hpat_func(obj, window, min_periods, ddof)
ref_result = test_impl(obj, window, min_periods, ddof)
assert_equal(jit_result, ref_result)
def _test_rolling_var_exception_unsupported_ddof(self, obj):
test_impl = rolling_var_usecase
hpat_func = self.jit(test_impl)
window, min_periods, invalid_ddof = 3, 2, '1'
with self.assertRaises(TypingError) as raises:
hpat_func(obj, window, min_periods, invalid_ddof)
msg = 'Method rolling.var(). The object ddof\n given: unicode_type\n expected: int'
self.assertIn(msg, str(raises.exception))
@skip_sdc_jit('DataFrame.rolling.min() unsupported exceptions')
def test_df_rolling_unsupported_values(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_unsupported_values(df)
@skip_sdc_jit('DataFrame.rolling.min() unsupported exceptions')
def test_df_rolling_unsupported_types(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_unsupported_types(df)
@skip_sdc_jit('DataFrame.rolling.apply() unsupported')
def test_df_rolling_apply_mean(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_apply_mean(df)
@skip_sdc_jit('DataFrame.rolling.apply() unsupported exceptions')
def test_df_rolling_apply_unsupported_types(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_apply_unsupported_types(df)
@unittest.skip('DataFrame.rolling.apply() unsupported args')
def test_df_rolling_apply_args(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_apply_args(df)
@skip_sdc_jit('DataFrame.rolling.corr() unsupported')
def test_df_rolling_corr(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
for d in all_data:
other = pd.Series(d)
self._test_rolling_corr(df, other)
other_all_data = deepcopy(all_data) + [list(range(10))[::-1]]
other_all_data[1] = [-1., 1., 0., -0.1, 0.1, 0.]
other_length = min(len(d) for d in other_all_data)
other_data = {n: d[:other_length] for n, d in zip(string.ascii_uppercase, other_all_data)}
other = pd.DataFrame(other_data)
self._test_rolling_corr(df, other)
@skip_sdc_jit('DataFrame.rolling.corr() unsupported')
def test_df_rolling_corr_no_other(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_corr_with_no_other(df)
@skip_sdc_jit('DataFrame.rolling.corr() unsupported exceptions')
def test_df_rolling_corr_unsupported_types(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_corr_unsupported_types(df)
@skip_sdc_jit('DataFrame.rolling.corr() unsupported exceptions')
def test_df_rolling_corr_unsupported_values(self):
def test_impl(df, other, pairwise):
return df.rolling(3, 3).corr(other=other, pairwise=pairwise)
hpat_func = self.jit(test_impl)
msg_tmpl = 'Method rolling.corr(). The object pairwise\n expected: {}'
df = pd.DataFrame({'A': [1., -1., 0., 0.1, -0.1],
'B': [-1., 1., 0., -0.1, 0.1]})
for pairwise in [None, True]:
with self.assertRaises(ValueError) as raises:
hpat_func(df, None, pairwise)
self.assertIn(msg_tmpl.format('False'), str(raises.exception))
other = pd.DataFrame({'A': [-1., 1., 0., -0.1, 0.1],
'C': [1., -1., 0., 0.1, -0.1]})
with self.assertRaises(ValueError) as raises:
hpat_func(df, other, True)
self.assertIn(msg_tmpl.format('False, None'), str(raises.exception))
@skip_sdc_jit('DataFrame.rolling.count() unsupported')
def test_df_rolling_count(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_count(df)
@skip_sdc_jit('DataFrame.rolling.cov() unsupported')
def test_df_rolling_cov(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
for d in all_data:
other = pd.Series(d)
self._test_rolling_cov(df, other)
other_all_data = deepcopy(all_data) + [list(range(10))[::-1]]
other_all_data[1] = [-1., 1., 0., -0.1, 0.1]
other_length = min(len(d) for d in other_all_data)
other_data = {n: d[:other_length] for n, d in zip(string.ascii_uppercase, other_all_data)}
other = pd.DataFrame(other_data)
self._test_rolling_cov(df, other)
@skip_sdc_jit('DataFrame.rolling.cov() unsupported')
def test_df_rolling_cov_no_other(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_cov_with_no_other(df)
@skip_sdc_jit('DataFrame.rolling.cov() unsupported exceptions')
def test_df_rolling_cov_unsupported_types(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_cov_unsupported_types(df)
@skip_sdc_jit('DataFrame.rolling.cov() unsupported exceptions')
def test_df_rolling_cov_unsupported_values(self):
def test_impl(df, other, pairwise):
return df.rolling(3, 3).cov(other=other, pairwise=pairwise)
hpat_func = self.jit(test_impl)
msg_tmpl = 'Method rolling.cov(). The object pairwise\n expected: {}'
df = pd.DataFrame({'A': [1., -1., 0., 0.1, -0.1],
'B': [-1., 1., 0., -0.1, 0.1]})
for pairwise in [None, True]:
with self.assertRaises(ValueError) as raises:
hpat_func(df, None, pairwise)
self.assertIn(msg_tmpl.format('False'), str(raises.exception))
other = pd.DataFrame({'A': [-1., 1., 0., -0.1, 0.1],
'C': [1., -1., 0., 0.1, -0.1]})
with self.assertRaises(ValueError) as raises:
hpat_func(df, other, True)
self.assertIn(msg_tmpl.format('False, None'), str(raises.exception))
@skip_sdc_jit('Series.rolling.cov() unsupported Series index')
@unittest.expectedFailure
def test_df_rolling_cov_issue_floating_point_rounding(self):
"""
Cover issue of different float rounding in Python and SDC/Numba:
s = np.Series([1., -1., 0., 0.1, -0.1])
s.rolling(2, 0).mean()
Python: SDC/Numba:
0 1.000000e+00 0 1.00
1 0.000000e+00 1 0.00
2 -5.000000e-01 2 -0.50
3 5.000000e-02 3 0.05
4 -1.387779e-17 4 0.00
dtype: float64 dtype: float64
BTW: cov uses mean inside itself
"""
def test_impl(df, window, min_periods, other, ddof):
return df.rolling(window, min_periods).cov(other, ddof=ddof)
hpat_func = self.jit(test_impl)
df = pd.DataFrame({'A': [1., -1., 0., 0.1, -0.1]})
other = pd.DataFrame({'A': [-1., 1., 0., -0.1, 0.1, 0.]})
jit_result = hpat_func(df, 2, 0, other, 1)
ref_result = test_impl(df, 2, 0, other, 1)
pd.testing.assert_frame_equal(jit_result, ref_result)
@skip_sdc_jit('DataFrame.rolling.kurt() unsupported')
def test_df_rolling_kurt(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_kurt(df)
@skip_sdc_jit('DataFrame.rolling.max() unsupported')
def test_df_rolling_max(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_max(df)
@skip_sdc_jit('DataFrame.rolling.mean() unsupported')
def test_df_rolling_mean(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_mean(df)
@skip_sdc_jit('DataFrame.rolling.median() unsupported')
def test_df_rolling_median(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_median(df)
@skip_sdc_jit('DataFrame.rolling.min() unsupported')
def test_df_rolling_min(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_min(df)
@unittest.expectedFailure
@unittest.skipIf(platform.system() == 'Darwin', 'Segmentation fault on Mac')
@skip_sdc_jit('DataFrame.rolling.min() unsupported')
def test_df_rolling_min_exception_many_columns(self):
def test_impl(df):
return df.rolling(3).min()
hpat_func = self.jit(test_impl)
# more than 19 columns raise SystemError: CPUDispatcher() returned a result with an error set
all_data = test_global_input_data_float64 * 5
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
pd.testing.assert_frame_equal(hpat_func(df), test_impl(df))
@skip_sdc_jit('DataFrame.rolling.quantile() unsupported')
def test_df_rolling_quantile(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_quantile(df)
@skip_sdc_jit('DataFrame.rolling.quantile() unsupported exceptions')
def test_df_rolling_quantile_exception_unsupported_types(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_quantile_exception_unsupported_types(df)
@skip_sdc_jit('DataFrame.rolling.quantile() unsupported exceptions')
def test_df_rolling_quantile_exception_unsupported_values(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_quantile_exception_unsupported_values(df)
@skip_sdc_jit('DataFrame.rolling.skew() unsupported')
def test_df_rolling_skew(self):
all_data = test_global_input_data_float64
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_skew(df)
@skip_sdc_jit('DataFrame.rolling.std() unsupported')
def test_df_rolling_std(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_std(df)
@skip_sdc_jit('DataFrame.rolling.std() unsupported exceptions')
def test_df_rolling_std_exception_unsupported_ddof(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_std_exception_unsupported_ddof(df)
@skip_sdc_jit('DataFrame.rolling.sum() unsupported')
def test_df_rolling_sum(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_sum(df)
@skip_sdc_jit('DataFrame.rolling.var() unsupported')
def test_df_rolling_var(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_var(df)
@skip_sdc_jit('DataFrame.rolling.var() unsupported exceptions')
def test_df_rolling_var_exception_unsupported_ddof(self):
all_data = [[1., -1., 0., 0.1, -0.1], [-1., 1., 0., -0.1, 0.1]]
length = min(len(d) for d in all_data)
data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
df = pd.DataFrame(data)
self._test_rolling_var_exception_unsupported_ddof(df)
@skip_sdc_jit('Series.rolling.min() unsupported exceptions')
def test_series_rolling_unsupported_values(self):
series = pd.Series(test_global_input_data_float64[0])
self._test_rolling_unsupported_values(series)
@skip_sdc_jit('Series.rolling.min() unsupported exceptions')
def test_series_rolling_unsupported_types(self):
series = pd.Series(test_global_input_data_float64[0])
self._test_rolling_unsupported_types(series)
@skip_sdc_jit('Series.rolling.apply() unsupported Series index')
def test_series_rolling_apply_mean(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_apply_mean(series)
@skip_sdc_jit('Series.rolling.apply() unsupported exceptions')
def test_series_rolling_apply_unsupported_types(self):
series = pd.Series([1., -1., 0., 0.1, -0.1])
self._test_rolling_apply_unsupported_types(series)
@unittest.skip('Series.rolling.apply() unsupported args')
def test_series_rolling_apply_args(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_apply_args(series)
@skip_sdc_jit('Series.rolling.corr() unsupported Series index')
def test_series_rolling_corr(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[-1., 1., 0., -0.1, 0.1, 0.],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
for main_data, other_data in product(all_data, all_data):
series = pd.Series(main_data)
other = pd.Series(other_data)
self._test_rolling_corr(series, other)
@skip_sdc_jit('Series.rolling.corr() unsupported Series index')
def test_series_rolling_corr_diff_length(self):
def test_impl(series, window, other):
return series.rolling(window).corr(other)
hpat_func = self.jit(test_impl)
series = pd.Series([1., -1., 0., 0.1, -0.1])
other = pd.Series(gen_frand_array(40))
window = 5
jit_result = hpat_func(series, window, other)
ref_result = test_impl(series, window, other)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.corr() unsupported Series index')
def test_series_rolling_corr_with_no_other(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
for data in all_data:
series = pd.Series(data)
self._test_rolling_corr_with_no_other(series)
@skip_sdc_jit('Series.rolling.corr() unsupported exceptions')
def test_series_rolling_corr_unsupported_types(self):
series = pd.Series([1., -1., 0., 0.1, -0.1])
self._test_rolling_corr_unsupported_types(series)
@skip_sdc_jit('Series.rolling.corr() unsupported Series index')
@unittest.expectedFailure # https://jira.devtools.intel.com/browse/SAT-2377
def test_series_rolling_corr_index(self):
def test_impl(S1, S2):
return S1.rolling(window=3).corr(S2)
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
index_values = np.arange(n)
np.random.shuffle(index_values)
S1 = pd.Series(np.arange(n), index=index_values, name='A')
np.random.shuffle(index_values)
S2 = pd.Series(2 * np.arange(n) - 5, index=index_values, name='B')
result = hpat_func(S1, S2)
result_ref = test_impl(S1, S2)
pd.testing.assert_series_equal(result, result_ref)
@skip_sdc_jit('Series.rolling.count() unsupported Series index')
def test_series_rolling_count(self):
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_count(series)
@skip_sdc_jit('Series.rolling.cov() unsupported Series index')
def test_series_rolling_cov(self):
all_data = [
list(range(5)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
for main_data, other_data in product(all_data, all_data):
series = pd.Series(main_data)
other = pd.Series(other_data)
self._test_rolling_cov(series, other)
@skip_sdc_jit('Series.rolling.cov() unsupported Series index')
def test_series_rolling_cov_diff_length(self):
def test_impl(series, window, other):
return series.rolling(window).cov(other)
hpat_func = self.jit(test_impl)
series = pd.Series([1., -1., 0., 0.1, -0.1])
other = pd.Series(gen_frand_array(40))
window = 5
jit_result = hpat_func(series, window, other)
ref_result = test_impl(series, window, other)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.cov() unsupported Series index')
def test_series_rolling_cov_no_other(self):
all_data = [
list(range(5)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
for data in all_data:
series = pd.Series(data)
self._test_rolling_cov_with_no_other(series)
@skip_sdc_jit('Series.rolling.cov() unsupported Series index')
@unittest.expectedFailure
def test_series_rolling_cov_issue_floating_point_rounding(self):
"""Cover issue of different float rounding in Python and SDC/Numba"""
def test_impl(series, window, min_periods, other, ddof):
return series.rolling(window, min_periods).cov(other, ddof=ddof)
hpat_func = self.jit(test_impl)
series = pd.Series(list(range(10)))
other = pd.Series([1., -1., 0., 0.1, -0.1])
jit_result = hpat_func(series, 6, 0, other, 1)
ref_result = test_impl(series, 6, 0, other, 1)
pd.testing.assert_series_equal(jit_result, ref_result)
@skip_sdc_jit('Series.rolling.cov() unsupported exceptions')
def test_series_rolling_cov_unsupported_types(self):
series = pd.Series([1., -1., 0., 0.1, -0.1])
self._test_rolling_cov_unsupported_types(series)
@skip_sdc_jit('Series.rolling.kurt() unsupported Series index')
def test_series_rolling_kurt(self):
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_kurt(series)
@skip_sdc_jit('Series.rolling.max() unsupported Series index')
def test_series_rolling_max(self):
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_max(series)
@skip_sdc_jit('Series.rolling.mean() unsupported Series index')
def test_series_rolling_mean(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_mean(series)
@skip_sdc_jit('Series.rolling.median() unsupported Series index')
def test_series_rolling_median(self):
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_median(series)
@skip_sdc_jit('Series.rolling.min() unsupported Series index')
def test_series_rolling_min(self):
all_data = test_global_input_data_float64
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_min(series)
@skip_sdc_jit('Series.rolling.quantile() unsupported Series index')
def test_series_rolling_quantile(self):
all_data = [
list(range(10)), [1., -1., 0., 0.1, -0.1],
[1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
[np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
]
indices = [list(range(len(data)))[::-1] for data in all_data]
for data, index in zip(all_data, indices):
series = pd.Series(data, index, name='A')
self._test_rolling_quantile(series)
@skip_sdc_jit('Series.rolling.quantile() unsupported exceptions')
def test_series_rolling_quantile_exception_unsupported_types(self):
series = | pd.Series([1., -1., 0., 0.1, -0.1]) | pandas.Series |
"""
@author: ludvigolsen
"""
import numpy as np
import pandas as pd
def convert_to_df(data):
"""
Checks the type of data
If it is not a pd.DataFrame it
attempts to convert to pd.DataFrame
"""
data_type = 'pd.DataFrame'
if isinstance(data, pd.DataFrame):
return data, data_type
elif isinstance(data, pd.Series):
data_type = 'pd.Series'
data = pd.DataFrame({'x': data})
elif type(data).__module__ == np.__name__:
try:
data = | pd.DataFrame(data) | pandas.DataFrame |
from numpy import linalg, zeros, ones, hstack, asarray, vstack, array, mean, std
import itertools
import matplotlib.pyplot as plt
from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.dates as mdates
from sklearn.metrics import mean_squared_error
from math import sqrt
import warnings
import copy
import time
warnings.filterwarnings("ignore")
import seaborn as sns
sns.set(style="whitegrid")
from PVPolyfit import preprocessing as preprocess
from PVPolyfit import utilities
from PVPolyfit import clustering as cluster
from PVPolyfit import kernel
def pvpolyfit(train_df, test_df, Y_tag, xs, I_tag, ghi_tag, cs_tag, highest_num_clusters, highest_degree, kernel_type, Y_high_filter, min_count_per_day, include_preprocess = False, plot_graph = True, graph_type = 'regression', print_info = False):
#print("h ERE")
if len(train_df) == 0 or len(test_df) == 0:
raise Exception("Either one or both DataFrames are empty.")
pvpoly = PVPolyfit(train_df, test_df, Y_tag, xs, I_tag, ghi_tag, cs_tag, print_info)
#print("H eRE")
pvpoly.prepare(Y_high_filter, min_count_per_day, include_preprocess)
#print("He RE")
rmse_list = []
std_rmse_list = []
pvpoly_objects = []
combined_labels = []
for i in range(1, highest_num_clusters+1):
pvpoly_iter = copy.deepcopy(pvpoly)
try:
labels = pvpoly_iter.run(num_clusters = i, num_iterations = 1, degrees = list(range(1,highest_degree+1)), kernel_type = kernel_type)
all_best_dfs, ultimate_days, avg_rmse, std_rmse = pvpoly_iter.evaluate(print_info = print_info)
rmse_list.append(avg_rmse)
std_rmse_list.append(std_rmse)
pvpoly_objects.append(pvpoly_iter)
combined_labels.append(labels)
except Exception as e:
if print_info:
print(e)
break
if len(rmse_list) == 0:
raise Exception("No Output was produced.")
min_idx = np.argmin(rmse_list)
if print_info:
print(min_idx)
print("{} cluster(s) were used.".format(range(1,highest_num_clusters+1)[min_idx]))
days_rmses, model_output, meases, df = pvpoly_objects[min_idx].plot(graph_type = graph_type, print_info = print_info, plot_graph = plot_graph)
return model_output, meases, days_rmses, range(1,highest_num_clusters+1)[min_idx], df, combined_labels[min_idx]
def _pvpolyfit_inputCluster(train_df, test_df, Y_tag, xs, I_tag, ghi_tag, cs_tag, num_clusters, highest_degree, kernel_type, Y_high_filter, min_count_per_day, include_preprocess = False, plot_graph = True, graph_type = 'regression', print_info = False):
#print('inside')
if len(train_df) == 0 or len(test_df) == 0:
raise Exception("Either one or both DataFrames are empty.")
pvpoly = PVPolyfit(train_df, test_df, Y_tag, xs, I_tag, ghi_tag, cs_tag, print_info)
pvpoly.prepare(Y_high_filter, min_count_per_day, include_preprocess)
try:
pvpoly.run(num_clusters = num_clusters, num_iterations = 1, degrees = list(range(1,highest_degree+1)), kernel_type = kernel_type)
all_best_dfs, ultimate_days, avg_rmse, std_rmse = pvpoly.evaluate(print_info = print_info)
except Exception as e:
raise Exception("Error has occurred: ", e)
if len(str(avg_rmse)) == 0:
raise Exception("No Output was produced. Go here for more information: ")
days_rmses, model_output, meases, df = pvpoly.plot(graph_type = graph_type, print_info = print_info, plot_graph = plot_graph)
return model_output, meases, days_rmses, num_clusters, df
def break_days(df, filter_bool, min_count_per_day = 8, frequency = 'days', print_info = False):
index_list = []
day_hour_list = []
prev = 0
for index, j in enumerate(df.index):
if str(type(j)) != "<class 'str'>":
print(type(j))
print(j)
print(df.loc[j])
j = j.strftime('%m/%d/%Y %H:%M:%S %p')
if frequency == 'days':
curr = int(datetime.strptime(j, '%m/%d/%Y %H:%M:%S %p').strftime('%d'))
frq = datetime.strptime(j, '%m/%d/%Y %H:%M:%S %p').strftime('%m/%d/%Y')
elif frequency == 'hours':
curr = int(datetime.strptime(j, '%m/%d/%Y %H:%M:%S %p').strftime('%H'))
frq = datetime.strptime(j, '%m/%d/%Y %H:%M:%S %p').strftime('%m/%d/%Y %H')
if curr != prev:
index_list.append(index)
day_hour_list.append(frq)
prev = curr
last_index = index
cut_results = []
# Break df into days
for k in range(len(index_list)):
if k == (len(index_list)-1):
# append lasfinal_df.iloc[[iindex]].indext day
cut_results.append(df[index_list[k]:-1])
else:
cut_results.append(df[index_list[k]:index_list[k+1]])
cut_results[-1] = | pd.concat([cut_results[-1], df.iloc[[-1]]]) | pandas.concat |
from PySDDP.dessem.script.templates.dadger import DadgerTemplate
import pandas as pd
import os
from typing import IO
COMENTARIO = '&'
class Dadger(DadgerTemplate):
"""
Classe que contem todos os elementos comuns a qualquer versao do arquivo Entdados do Dessem.
Esta classe tem como intuito fornecer duck typing para a classe Dessem e ainda adicionar um nivel de especificacao
dentro da fabrica. Alem disso esta classe deve passar adiante a responsabilidade da implementacao dos metodos de
leitura e escrita
"""
def __init__(self):
super().__init__()
self.comentarios = list()
self.tm = dict()
self.sist = dict()
self.ree = dict()
self.uh = dict()
self.tviag = dict()
self.ut = dict()
self.usie = dict()
self.dp = dict()
self.de = dict()
self.cd = dict()
self.ri = dict()
self.ia = dict()
self.rd = dict()
self.rivar = dict()
self.it = dict()
self.gp = dict()
self.ni = dict()
self.ve = dict()
self.ci_ce = dict()
self.re = dict()
self.lu = dict()
self.fh = dict()
self.ft = dict()
self.fi = dict()
self.fe = dict()
self.fr = dict()
self.fc = dict()
self.ac = dict()
self.da = dict()
self.fp = dict()
self.ez = dict()
self.ag = dict()
self.mh = dict()
self.mt = dict()
self.tx = dict()
self.pq = dict()
self.secr = dict()
self.cr = dict()
self.r11 = dict()
self.vr = dict()
self.pd = dict()
self.vm = dict()
self.df = dict()
self.me = dict()
self.meta_cjsist = dict()
self.meta_sist = dict()
self.meta_usit = dict()
self.sh = dict()
self.tf = dict()
self.rs = dict()
self.sp = dict()
self.ps = dict()
self.pp = dict()
def ler(self, file_name: str) -> None:
self.entdados = list()
# listas referentes ao dicionário TM
self.tm['mne'] = list()
self.tm['dd'] = list()
self.tm['hr'] = list()
self.tm['mh'] = list()
self.tm['durac'] = list()
self.tm['rede'] = list()
self.tm['patamar'] = list()
# listas referentes ao dicionário SIST
self.sist['mne'] = list()
self.sist['num'] = list()
self.sist['mne_iden'] = list()
self.sist['flag'] = list()
self.sist['nome'] = list()
# listas referentes ao dicionário REE
self.ree['mne'] = list()
self.ree['num_ree'] = list()
self.ree['num_sub'] = list()
self.ree['nome'] = list()
# listas referentes ao dicionário UH
self.uh['mne'] = list()
self.uh['ind'] = list()
self.uh['nome'] = list()
self.uh['ss'] = list()
self.uh['vinic'] = list()
self.uh['evap'] = list()
self.uh['di'] = list()
self.uh['hi'] = list()
self.uh['m'] = list()
self.uh['vmor'] = list()
self.uh['prod'] = list()
self.uh['rest'] = list()
# listas referentes ao dicionário TVIAG
self.tviag['mne'] = list()
self.tviag['mont'] = list()
self.tviag['jus'] = list()
self.tviag['tp'] = list()
self.tviag['hr'] = list()
self.tviag['tpTviag'] = list()
# listas referentes ao dicionário UT
self.ut['mne'] = list()
self.ut['num'] = list()
self.ut['nome'] = list()
self.ut['ss'] = list()
self.ut['flag'] = list()
self.ut['di'] = list()
self.ut['hi'] = list()
self.ut['mi'] = list()
self.ut['df'] = list()
self.ut['hf'] = list()
self.ut['mf'] = list()
self.ut['rest'] = list()
self.ut['gmin'] = list()
self.ut['gmax'] = list()
self.ut['g_anterior'] = list()
# listas referentes ao dicionário USIE
self.usie['mne'] = list()
self.usie['num'] = list()
self.usie['ss'] = list()
self.usie['nome'] = list()
self.usie['mont'] = list()
self.usie['jus'] = list()
self.usie['qmin'] = list()
self.usie['qmax'] = list()
self.usie['taxa_consumo'] = list()
# listas referentes ao dicionário DP
self.dp['mne'] = list()
self.dp['ss'] = list()
self.dp['di'] = list()
self.dp['hi'] = list()
self.dp['mi'] = list()
self.dp['df'] = list()
self.dp['hf'] = list()
self.dp['mf'] = list()
self.dp['demanda'] = list()
# listas referentes ao dicionário DE
self.de['mne'] = list()
self.de['nde'] = list()
self.de['di'] = list()
self.de['hi'] = list()
self.de['mi'] = list()
self.de['df'] = list()
self.de['hf'] = list()
self.de['mf'] = list()
self.de['demanda'] = list()
self.de['justific'] = list()
# listas referentes ao dicionário CD
self.cd['mne'] = list()
self.cd['is'] = list()
self.cd['cd'] = list()
self.cd['di'] = list()
self.cd['hi'] = list()
self.cd['mi'] = list()
self.cd['df'] = list()
self.cd['hf'] = list()
self.cd['mf'] = list()
self.cd['custo'] = list()
self.cd['limsup'] = list()
# listas referentes ao dicionário RI
self.ri['mne'] = list()
self.ri['di'] = list()
self.ri['hi'] = list()
self.ri['mi'] = list()
self.ri['df'] = list()
self.ri['hf'] = list()
self.ri['mf'] = list()
self.ri['gh50min'] = list()
self.ri['gh50max'] = list()
self.ri['gh60min'] = list()
self.ri['gh60max'] = list()
self.ri['ande'] = list()
# listas referentes ao dicionário IA
self.ia['mne'] = list()
self.ia['ss1'] = list()
self.ia['ss2'] = list()
self.ia['di'] = list()
self.ia['hi'] = list()
self.ia['mi'] = list()
self.ia['df'] = list()
self.ia['hf'] = list()
self.ia['mf'] = list()
self.ia['ss1_ss2'] = list()
self.ia['ss2_ss1'] = list()
# listas referentes ao dicionário RD
self.rd['mne'] = list()
self.rd['flag_fol'] = list()
self.rd['ncirc'] = list()
self.rd['dbar'] = list()
self.rd['lim'] = list()
self.rd['dlin'] = list()
self.rd['perd'] = list()
self.rd['formato'] = list()
# listas referentes ao dicionário RIVAR
self.rivar['mne'] = list()
self.rivar['num'] = list()
self.rivar['ss'] = list()
self.rivar['cod'] = list()
self.rivar['penalidade'] = list()
# listas referentes ao dicionário IT
self.it['mne'] = list()
self.it['num'] = list()
self.it['coef'] = list()
# listas referentes ao dicionário GP
self.gp['mne'] = list()
self.gp['tol_conv'] = list()
self.gp['tol_prob'] = list()
# listas referentes ao dicionário NI
self.ni['mne'] = list()
self.ni['flag'] = list()
self.ni['nmax'] = list()
# listas referentes ao dicionário VE
self.ve['mne'] = list()
self.ve['ind'] = list()
self.ve['di'] = list()
self.ve['hi'] = list()
self.ve['mi'] = list()
self.ve['df'] = list()
self.ve['hf'] = list()
self.ve['mf'] = list()
self.ve['vol'] = list()
# listas referentes ao dicionário CI/CE
self.ci_ce['mne'] = list()
self.ci_ce['num'] = list()
self.ci_ce['nome'] = list()
self.ci_ce['ss_busf'] = list()
self.ci_ce['flag'] = list()
self.ci_ce['di'] = list()
self.ci_ce['hi'] = list()
self.ci_ce['mi'] = list()
self.ci_ce['df'] = list()
self.ci_ce['hf'] = list()
self.ci_ce['mf'] = list()
self.ci_ce['unid'] = list()
self.ci_ce['linf'] = list()
self.ci_ce['lsup'] = list()
self.ci_ce['custo'] = list()
self.ci_ce['energia'] = list()
# listas referentes ao dicionário RE
self.re['mne'] = list()
self.re['ind'] = list()
self.re['di'] = list()
self.re['hi'] = list()
self.re['mi'] = list()
self.re['df'] = list()
self.re['hf'] = list()
self.re['mf'] = list()
# listas referentes ao dicionário LU
self.lu['mne'] = list()
self.lu['ind'] = list()
self.lu['di'] = list()
self.lu['hi'] = list()
self.lu['mi'] = list()
self.lu['df'] = list()
self.lu['hf'] = list()
self.lu['mf'] = list()
self.lu['linf'] = list()
self.lu['lsup'] = list()
# listas referentes ao dicionário FH
self.fh['mne'] = list()
self.fh['ind'] = list()
self.fh['di'] = list()
self.fh['hi'] = list()
self.fh['mi'] = list()
self.fh['df'] = list()
self.fh['hf'] = list()
self.fh['mf'] = list()
self.fh['ush'] = list()
self.fh['unh'] = list()
self.fh['fator'] = list()
# listas referentes ao dicionário FT
self.ft['mne'] = list()
self.ft['ind'] = list()
self.ft['di'] = list()
self.ft['hi'] = list()
self.ft['mi'] = list()
self.ft['df'] = list()
self.ft['hf'] = list()
self.ft['mf'] = list()
self.ft['ust'] = list()
self.ft['fator'] = list()
# listas referentes ao dicionário FI
self.fi['mne'] = list()
self.fi['ind'] = list()
self.fi['di'] = list()
self.fi['hi'] = list()
self.fi['mi'] = list()
self.fi['df'] = list()
self.fi['hf'] = list()
self.fi['mf'] = list()
self.fi['ss1'] = list()
self.fi['ss2'] = list()
self.fi['fator'] = list()
# listas referentes ao dicionário FE
self.fe['mne'] = list()
self.fe['ind'] = list()
self.fe['di'] = list()
self.fe['hi'] = list()
self.fe['mi'] = list()
self.fe['df'] = list()
self.fe['hf'] = list()
self.fe['mf'] = list()
self.fe['num_contrato'] = list()
self.fe['fator'] = list()
# listas referentes ao dicionário FR
self.fr['mne'] = list()
self.fr['ind'] = list()
self.fr['di'] = list()
self.fr['hi'] = list()
self.fr['mi'] = list()
self.fr['df'] = list()
self.fr['hf'] = list()
self.fr['mf'] = list()
self.fr['useol'] = list()
self.fr['fator'] = list()
# listas referentes ao dicionário FC
self.fc['mne'] = list()
self.fc['ind'] = list()
self.fc['di'] = list()
self.fc['hi'] = list()
self.fc['mi'] = list()
self.fc['df'] = list()
self.fc['hf'] = list()
self.fc['mf'] = list()
self.fc['demanda'] = list()
self.fc['fator'] = list()
# listas referentes ao dicionário AC
self.ac['mne'] = list()
self.ac['usi'] = list()
self.ac['mneumonico'] = list()
self.ac['ind'] = list()
self.ac['valor'] = list()
# listas referentes ao dicionário DA
self.da['mne'] = list()
self.da['ind'] = list()
self.da['di'] = list()
self.da['hi'] = list()
self.da['mi'] = list()
self.da['df'] = list()
self.da['hf'] = list()
self.da['mf'] = list()
self.da['taxa'] = list()
self.da['obs'] = list()
# listas referentes ao dicionário FP
self.fp['mne'] = list()
self.fp['usi'] = list()
self.fp['f'] = list()
self.fp['nptQ'] = list()
self.fp['nptV'] = list()
self.fp['concavidade'] = list()
self.fp['min_quadraticos'] = list()
self.fp['deltaV'] = list()
self.fp['tr'] = list()
# listas referentes ao dicionário EZ
self.ez['mne'] = list()
self.ez['usi'] = list()
self.ez['perc_vol'] = list()
# listas referentes ao dicionário AG
self.ag['mne'] = list()
self.ag['num_estagios'] = list()
# listas referentes ao dicionário MH
self.mh['mne'] = list()
self.mh['num'] = list()
self.mh['gr'] = list()
self.mh['id'] = list()
self.mh['di'] = list()
self.mh['hi'] = list()
self.mh['mi'] = list()
self.mh['df'] = list()
self.mh['hf'] = list()
self.mh['mf'] = list()
self.mh['f'] = list()
# listas referentes ao dicionário MT
self.mt['mne'] = list()
self.mt['ute'] = list()
self.mt['ug'] = list()
self.mt['di'] = list()
self.mt['hi'] = list()
self.mt['mi'] = list()
self.mt['df'] = list()
self.mt['hf'] = list()
self.mt['mf'] = list()
self.mt['f'] = list()
# listas referentes ao dicionário TX
self.tx['mne'] = list()
self.tx['taxa_fcf'] = list()
# listas referentes ao dicionário PQ
self.pq['mne'] = list()
self.pq['ind'] = list()
self.pq['nome'] = list()
self.pq['ss/b'] = list()
self.pq['di'] = list()
self.pq['hi'] = list()
self.pq['mi'] = list()
self.pq['df'] = list()
self.pq['hf'] = list()
self.pq['mf'] = list()
self.pq['geracao'] = list()
# listas referentes ao dicionário SECR
self.secr['mne'] = list()
self.secr['num'] = list()
self.secr['nome'] = list()
self.secr['usi_1'] = list()
self.secr['fator_1'] = list()
self.secr['usi_2'] = list()
self.secr['fator_2'] = list()
self.secr['usi_3'] = list()
self.secr['fator_3'] = list()
self.secr['usi_4'] = list()
self.secr['fator_4'] = list()
self.secr['usi_5'] = list()
self.secr['fator_5'] = list()
# listas referentes ao dicionário CR
self.cr['mne'] = list()
self.cr['num'] = list()
self.cr['nome'] = list()
self.cr['gr'] = list()
self.cr['A0'] = list()
self.cr['A1'] = list()
self.cr['A2'] = list()
self.cr['A3'] = list()
self.cr['A4'] = list()
self.cr['A5'] = list()
self.cr['A6'] = list()
# listas referentes ao dicionário R11
self.r11['mne'] = list()
self.r11['di'] = list()
self.r11['hi'] = list()
self.r11['mi'] = list()
self.r11['df'] = list()
self.r11['hf'] = list()
self.r11['mf'] = list()
self.r11['cotaIni'] = list()
self.r11['varhora'] = list()
self.r11['vardia'] = list()
self.r11['coef'] = list()
# listas referentes ao dicionário VR
self.vr['mne'] = list()
self.vr['dia'] = list()
self.vr['mneumo_verao'] = list()
# listas referentes ao dicionário PD
self.pd['mne'] = list()
self.pd['tol_perc'] = list()
self.pd['tol_MW'] = list()
# listas referentes ao dicionário VM
self.vm['mne'] = list()
self.vm['ind'] = list()
self.vm['di'] = list()
self.vm['hi'] = list()
self.vm['mi'] = list()
self.vm['df'] = list()
self.vm['hf'] = list()
self.vm['mf'] = list()
self.vm['taxa_enchimento'] = list()
# listas referentes ao dicionário DF
self.df['mne'] = list()
self.df['ind'] = list()
self.df['di'] = list()
self.df['hi'] = list()
self.df['mi'] = list()
self.df['df'] = list()
self.df['hf'] = list()
self.df['mf'] = list()
self.df['taxa_descarga'] = list()
# listas referentes ao dicionário ME
self.me['mne'] = list()
self.me['ind'] = list()
self.me['di'] = list()
self.me['hi'] = list()
self.me['mi'] = list()
self.me['df'] = list()
self.me['hf'] = list()
self.me['mf'] = list()
self.me['fator'] = list()
# listas referentes ao dicionário META CJSIST
self.meta_cjsist['mneumo'] = list()
self.meta_cjsist['ind'] = list()
self.meta_cjsist['nome'] = list()
# listas referentes ao dicionário META SIST
self.meta_sist['mne'] = list()
self.meta_sist['ind'] = list()
self.meta_sist['tp'] = list()
self.meta_sist['num'] = list()
self.meta_sist['meta'] = list()
self.meta_sist['tol_MW'] = list()
self.meta_sist['tol_perc'] = list()
# listas referentes ao dicionário META USIT
self.meta_usit['mne'] = list()
self.meta_usit['ind'] = list()
self.meta_usit['tp'] = list()
self.meta_usit['num'] = list()
self.meta_usit['meta'] = list()
self.meta_usit['tol_MW'] = list()
self.meta_usit['tol_perc'] = list()
# listas referentes ao dicionário SH
self.sh['mne'] = list()
self.sh['flag_simul'] = list()
self.sh['flag_pl'] = list()
self.sh['num_min'] = list()
self.sh['num_max'] = list()
self.sh['flag_quebra'] = list()
self.sh['ind_1'] = list()
self.sh['ind_2'] = list()
self.sh['ind_3'] = list()
self.sh['ind_4'] = list()
self.sh['ind_5'] = list()
# listas referentes ao dicionário TF
self.tf['mne'] = list()
self.tf['custo'] = list()
# listas referentes ao dicionário RS
self.rs['mne'] = list()
self.rs['cod'] = list()
self.rs['ind'] = list()
self.rs['subs'] = list()
self.rs['tp'] = list()
self.rs['comentario'] = list()
# listas referentes ao dicionário SP
self.sp['mne'] = list()
self.sp['flag'] = list()
# listas referentes ao dicionário PS
self.ps['mne'] = list()
self.ps['flag'] = list()
# listas referentes ao dicionário PP
self.pp['mne'] = list()
self.pp['flag'] = list()
self.pp['iteracoes'] = list()
self.pp['num'] = list()
self.pp['tp'] = list()
try:
with open(file_name, 'r', encoding='latin-1') as f: # type: IO[str]
continua = True
while continua:
self.next_line(f)
linha = self.linha
if linha[0] == COMENTARIO:
self.comentarios.append(linha)
self.entdados.append(linha)
continue
mne = linha[:6].strip().lower()
mne_sigla = linha[:3].strip().lower()
mneumo = linha[:13].strip().lower()
self.entdados.append(linha[:6])
# Leitura dos dados de acordo com o mneumo correspondente
if mne_sigla == 'tm':
self.tm['mne'].append(self.linha[:2])
self.tm['dd'].append(self.linha[4:6])
self.tm['hr'].append(self.linha[9:11])
self.tm['mh'].append(self.linha[14:15])
self.tm['durac'].append(self.linha[19:24])
self.tm['rede'].append(self.linha[29:30])
self.tm['patamar'].append(self.linha[33:39])
continue
if mne == 'sist':
self.sist['mne'].append(self.linha[:6])
self.sist['num'].append(self.linha[7:9])
self.sist['mne_iden'].append(self.linha[10:12])
self.sist['flag'].append(self.linha[13:15])
self.sist['nome'].append(self.linha[16:26])
continue
if mne == 'ree':
self.ree['mne'].append(self.linha[:3])
self.ree['num_ree'].append(self.linha[6:8])
self.ree['num_sub'].append(self.linha[9:11])
self.ree['nome'].append(self.linha[12:22])
continue
if mne_sigla == 'uh':
self.uh['mne'].append(self.linha[:2])
self.uh['ind'].append(self.linha[4:7])
self.uh['nome'].append(self.linha[9:21])
self.uh['ss'].append(self.linha[24:26])
self.uh['vinic'].append(self.linha[29:39])
self.uh['evap'].append(self.linha[39:40])
self.uh['di'].append(self.linha[41:43])
self.uh['hi'].append(self.linha[44:46])
self.uh['m'].append(self.linha[47:48])
self.uh['vmor'].append(self.linha[49:59])
self.uh['prod'].append(self.linha[64:65])
self.uh['rest'].append(self.linha[69:70])
continue
if mne == 'tviag':
self.tviag['mne'].append(self.linha[:6])
self.tviag['mont'].append(self.linha[6:9])
self.tviag['jus'].append(self.linha[10:13])
self.tviag['tp'].append(self.linha[14:15])
self.tviag['hr'].append(self.linha[19:22])
self.tviag['tpTviag'].append(self.linha[24:25])
continue
if mne_sigla == 'ut':
self.ut['mne'].append(self.linha[:2])
self.ut['num'].append(self.linha[4:7])
self.ut['nome'].append(self.linha[9:21])
self.ut['ss'].append(self.linha[22:24])
self.ut['flag'].append(self.linha[25:26])
self.ut['di'].append(self.linha[27:29])
self.ut['hi'].append(self.linha[30:32])
self.ut['mi'].append(self.linha[33:34])
self.ut['df'].append(self.linha[35:37])
self.ut['hf'].append(self.linha[38:40])
self.ut['mf'].append(self.linha[41:42])
self.ut['rest'].append(self.linha[46:47])
self.ut['gmin'].append(self.linha[47:57])
self.ut['gmax'].append(self.linha[57:67])
self.ut['g_anterior'].append(self.linha[67:77])
continue
if mne == 'usie':
self.usie['mne'].append(self.linha[:4])
self.usie['num'].append(self.linha[5:8])
self.usie['ss'].append(self.linha[9:11])
self.usie['nome'].append(self.linha[14:26])
self.usie['mont'].append(self.linha[29:32])
self.usie['jus'].append(self.linha[34:37])
self.usie['qmin'].append(self.linha[39:49])
self.usie['qmax'].append(self.linha[49:59])
self.usie['taxa_consumo'].append(self.linha[59:69])
continue
if mne_sigla == 'dp':
self.dp['mne'].append(self.linha[:2])
self.dp['ss'].append(self.linha[4:6])
self.dp['di'].append(self.linha[8:10])
self.dp['hi'].append(self.linha[11:13])
self.dp['mi'].append(self.linha[14:15])
self.dp['df'].append(self.linha[16:18])
self.dp['hf'].append(self.linha[19:21])
self.dp['mf'].append(self.linha[22:23])
self.dp['demanda'].append(self.linha[24:34])
continue
if mne_sigla == 'de':
self.de['mne'].append(self.linha[:2])
self.de['nde'].append(self.linha[4:7])
self.de['di'].append(self.linha[8:10])
self.de['hi'].append(self.linha[11:13])
self.de['mi'].append(self.linha[14:15])
self.de['df'].append(self.linha[16:18])
self.de['hf'].append(self.linha[19:21])
self.de['mf'].append(self.linha[22:23])
self.de['demanda'].append(self.linha[24:34])
self.de['justific'].append(self.linha[35:45])
continue
if mne_sigla == 'cd':
self.cd['mne'].append(self.linha[:2])
self.cd['is'].append(self.linha[3:5])
self.cd['cd'].append(self.linha[6:8])
self.cd['di'].append(self.linha[9:11])
self.cd['hi'].append(self.linha[12:14])
self.cd['mi'].append(self.linha[15:16])
self.cd['df'].append(self.linha[17:19])
self.cd['hf'].append(self.linha[20:22])
self.cd['mf'].append(self.linha[23:24])
self.cd['custo'].append(self.linha[25:35])
self.cd['limsup'].append(self.linha[35:45])
continue
if mne_sigla == 'ri':
self.ri['mne'].append(self.linha[:2])
self.ri['di'].append(self.linha[8:10])
self.ri['hi'].append(self.linha[11:13])
self.ri['mi'].append(self.linha[14:15])
self.ri['df'].append(self.linha[16:18])
self.ri['hf'].append(self.linha[19:21])
self.ri['mf'].append(self.linha[22:23])
self.ri['gh50min'].append(self.linha[26:36])
self.ri['gh50max'].append(self.linha[36:46])
self.ri['gh60min'].append(self.linha[46:56])
self.ri['gh60max'].append(self.linha[56:66])
self.ri['ande'].append(self.linha[66:76])
continue
if mne_sigla == 'ia':
self.ia['mne'].append(self.linha[:2])
self.ia['ss1'].append(self.linha[4:6])
self.ia['ss2'].append(self.linha[9:11])
self.ia['di'].append(self.linha[13:15])
self.ia['hi'].append(self.linha[16:18])
self.ia['mi'].append(self.linha[19:20])
self.ia['df'].append(self.linha[21:23])
self.ia['hf'].append(self.linha[24:26])
self.ia['mf'].append(self.linha[27:28])
self.ia['ss1_ss2'].append(self.linha[29:39])
self.ia['ss2_ss1'].append(self.linha[39:49])
continue
if mne_sigla == 'rd':
self.rd['mne'].append(self.linha[:2])
self.rd['flag_fol'].append(self.linha[4:5])
self.rd['ncirc'].append(self.linha[8:12])
self.rd['dbar'].append(self.linha[14:15])
self.rd['lim'].append(self.linha[16:17])
self.rd['dlin'].append(self.linha[18:19])
self.rd['perd'].append(self.linha[20:21])
self.rd['formato'].append(self.linha[22:23])
continue
if mne == 'rivar':
self.rivar['mne'].append(self.linha[:5])
self.rivar['num'].append(self.linha[7:10])
self.rivar['ss'].append(self.linha[11:14])
self.rivar['cod'].append(self.linha[15:17])
self.rivar['penalidade'].append(self.linha[19:29])
continue
if mne_sigla == 'it':
self.it['mne'].append(self.linha[:2])
self.it['num'].append(self.linha[4:6])
self.it['coef'].append(self.linha[9:84])
continue
if mne_sigla == 'gp':
self.gp['mne'].append(self.linha[:2])
self.gp['tol_conv'].append(self.linha[4:14])
self.gp['tol_prob'].append(self.linha[15:25])
continue
if mne_sigla == 'ni':
self.ni['mne'].append(self.linha[:2])
self.ni['flag'].append(self.linha[4:5])
self.ni['nmax'].append(self.linha[9:12])
continue
if mne_sigla == 've':
self.ve['mne'].append(self.linha[:2])
self.ve['ind'].append(self.linha[4:7])
self.ve['di'].append(self.linha[8:10])
self.ve['hi'].append(self.linha[11:13])
self.ve['mi'].append(self.linha[14:15])
self.ve['df'].append(self.linha[16:18])
self.ve['hf'].append(self.linha[19:21])
self.ve['mf'].append(self.linha[22:23])
self.ve['vol'].append(self.linha[24:34])
continue
if mne_sigla == 'ci' or mne_sigla == 'ce':
self.ci_ce['mne'].append(self.linha[:2])
self.ci_ce['num'].append(self.linha[3:6])
self.ci_ce['nome'].append(self.linha[7:17])
self.ci_ce['ss_busf'].append(self.linha[18:23])
self.ci_ce['flag'].append(self.linha[23:24])
self.ci_ce['di'].append(self.linha[25:27])
self.ci_ce['hi'].append(self.linha[28:30])
self.ci_ce['mi'].append(self.linha[31:32])
self.ci_ce['df'].append(self.linha[33:35])
self.ci_ce['hf'].append(self.linha[36:38])
self.ci_ce['mf'].append(self.linha[39:40])
self.ci_ce['unid'].append(self.linha[41:42])
self.ci_ce['linf'].append(self.linha[43:53])
self.ci_ce['lsup'].append(self.linha[53:63])
self.ci_ce['custo'].append(self.linha[63:73])
self.ci_ce['energia'].append(self.linha[73:83])
continue
if mne_sigla == 're':
self.re['mne'].append(self.linha[:2])
self.re['ind'].append(self.linha[4:7])
self.re['di'].append(self.linha[9:11])
self.re['hi'].append(self.linha[12:14])
self.re['mi'].append(self.linha[15:16])
self.re['df'].append(self.linha[17:19])
self.re['hf'].append(self.linha[20:22])
self.re['mf'].append(self.linha[23:24])
continue
if mne_sigla == 'lu':
self.lu['mne'].append(self.linha[:2])
self.lu['ind'].append(self.linha[4:7])
self.lu['di'].append(self.linha[8:10])
self.lu['hi'].append(self.linha[11:13])
self.lu['mi'].append(self.linha[14:15])
self.lu['df'].append(self.linha[16:18])
self.lu['hf'].append(self.linha[19:21])
self.lu['mf'].append(self.linha[22:23])
self.lu['linf'].append(self.linha[24:34])
self.lu['lsup'].append(self.linha[34:44])
continue
if mne_sigla == 'fh':
self.fh['mne'].append(self.linha[:2])
self.fh['ind'].append(self.linha[4:7])
self.fh['di'].append(self.linha[8:10])
self.fh['hi'].append(self.linha[11:13])
self.fh['mi'].append(self.linha[14:15])
self.fh['df'].append(self.linha[16:18])
self.fh['hf'].append(self.linha[19:21])
self.fh['mf'].append(self.linha[22:23])
self.fh['ush'].append(self.linha[24:27])
self.fh['unh'].append(self.linha[27:29])
self.fh['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'ft':
self.ft['mne'].append(self.linha[:2])
self.ft['ind'].append(self.linha[4:7])
self.ft['di'].append(self.linha[8:10])
self.ft['hi'].append(self.linha[11:13])
self.ft['mi'].append(self.linha[14:15])
self.ft['df'].append(self.linha[16:18])
self.ft['hf'].append(self.linha[19:21])
self.ft['mf'].append(self.linha[22:23])
self.ft['ust'].append(self.linha[24:27])
self.ft['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'fi':
self.fi['mne'].append(self.linha[:2])
self.fi['ind'].append(self.linha[4:7])
self.fi['di'].append(self.linha[8:10])
self.fi['hi'].append(self.linha[11:13])
self.fi['mi'].append(self.linha[14:15])
self.fi['df'].append(self.linha[16:18])
self.fi['hf'].append(self.linha[19:21])
self.fi['mf'].append(self.linha[22:23])
self.fi['ss1'].append(self.linha[24:26])
self.fi['ss2'].append(self.linha[29:31])
self.fi['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'fe':
self.fe['mne'].append(self.linha[:2])
self.fe['ind'].append(self.linha[4:7])
self.fe['di'].append(self.linha[8:10])
self.fe['hi'].append(self.linha[11:13])
self.fe['mi'].append(self.linha[14:15])
self.fe['df'].append(self.linha[16:18])
self.fe['hf'].append(self.linha[19:21])
self.fe['mf'].append(self.linha[22:23])
self.fe['num_contrato'].append(self.linha[24:27])
self.fe['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'fr':
self.fr['mne'].append(self.linha[:2])
self.fr['ind'].append(self.linha[4:9])
self.fr['di'].append(self.linha[10:12])
self.fr['hi'].append(self.linha[13:15])
self.fr['mi'].append(self.linha[16:17])
self.fr['df'].append(self.linha[18:20])
self.fr['hf'].append(self.linha[21:23])
self.fr['mf'].append(self.linha[24:25])
self.fr['useol'].append(self.linha[26:31])
self.fr['fator'].append(self.linha[36:46])
continue
if mne_sigla == 'fc':
self.fc['mne'].append(self.linha[:2])
self.fc['ind'].append(self.linha[4:7])
self.fc['di'].append(self.linha[10:12])
self.fc['hi'].append(self.linha[13:15])
self.fc['mi'].append(self.linha[16:17])
self.fc['df'].append(self.linha[18:20])
self.fc['hf'].append(self.linha[21:23])
self.fc['mf'].append(self.linha[24:25])
self.fc['demanda'].append(self.linha[26:29])
self.fc['fator'].append(self.linha[36:46])
continue
if mne_sigla == 'ac':
self.ac['mne'].append(self.linha[:2])
self.ac['usi'].append(self.linha[4:7])
self.ac['mneumonico'].append(self.linha[9:15])
self.ac['ind'].append(self.linha[15:19])
self.ac['valor'].append(self.linha[19:])
continue
if mne_sigla == 'da':
self.da['mne'].append(self.linha[:2])
self.da['ind'].append(self.linha[4:7])
self.da['di'].append(self.linha[8:10])
self.da['hi'].append(self.linha[11:13])
self.da['mi'].append(self.linha[14:15])
self.da['df'].append(self.linha[16:18])
self.da['hf'].append(self.linha[19:21])
self.da['mf'].append(self.linha[22:23])
self.da['taxa'].append(self.linha[24:34])
self.da['obs'].append(self.linha[35:47])
continue
if mne_sigla == 'fp':
self.fp['mne'].append(self.linha[:2])
self.fp['usi'].append(self.linha[3:6])
self.fp['f'].append(self.linha[7:8])
self.fp['nptQ'].append(self.linha[10:13])
self.fp['nptV'].append(self.linha[15:18])
self.fp['concavidade'].append(self.linha[20:21])
self.fp['min_quadraticos'].append(self.linha[24:25])
self.fp['deltaV'].append(self.linha[29:39])
self.fp['tr'].append(self.linha[39:49])
continue
if mne_sigla == 'ez':
self.ez['mne'].append(self.linha[:2])
self.ez['usi'].append(self.linha[4:7])
self.ez['perc_vol'].append(self.linha[9:14])
continue
if mne_sigla == 'ag':
self.ag['mne'].append(self.linha[:2])
self.ag['num_estagios'].append(self.linha[3:6])
continue
if mne_sigla == 'mh':
self.mh['mne'].append(self.linha[:2])
self.mh['num'].append(self.linha[4:7])
self.mh['gr'].append(self.linha[9:11])
self.mh['id'].append(self.linha[12:14])
self.mh['di'].append(self.linha[14:16])
self.mh['hi'].append(self.linha[17:19])
self.mh['mi'].append(self.linha[20:21])
self.mh['df'].append(self.linha[22:24])
self.mh['hf'].append(self.linha[25:27])
self.mh['mf'].append(self.linha[28:29])
self.mh['f'].append(self.linha[30:31])
continue
if mne_sigla == 'mt':
self.mt['mne'].append(self.linha[:2])
self.mt['ute'].append(self.linha[4:7])
self.mt['ug'].append(self.linha[8:11])
self.mt['di'].append(self.linha[13:15])
self.mt['hi'].append(self.linha[16:18])
self.mt['mi'].append(self.linha[19:20])
self.mt['df'].append(self.linha[21:23])
self.mt['hf'].append(self.linha[24:26])
self.mt['mf'].append(self.linha[27:28])
self.mt['f'].append(self.linha[29:30])
continue
if mne_sigla == 'tx':
self.tx['mne'].append(self.linha[:2])
self.tx['taxa_fcf'].append(self.linha[4:14])
continue
if mne_sigla == 'pq':
self.pq['mne'].append(self.linha[:2])
self.pq['ind'].append(self.linha[4:7])
self.pq['nome'].append(self.linha[9:19])
self.pq['ss/b'].append(self.linha[19:24])
self.pq['di'].append(self.linha[24:26])
self.pq['hi'].append(self.linha[27:29])
self.pq['mi'].append(self.linha[30:31])
self.pq['df'].append(self.linha[32:34])
self.pq['hf'].append(self.linha[35:37])
self.pq['mf'].append(self.linha[38:39])
self.pq['geracao'].append(self.linha[40:50])
continue
if mne == 'secr':
self.secr['mne'].append(self.linha[:4])
self.secr['num'].append(self.linha[5:8])
self.secr['nome'].append(self.linha[9:21])
self.secr['usi_1'].append(self.linha[24:27])
self.secr['fator_1'].append(self.linha[28:33])
self.secr['usi_2'].append(self.linha[34:37])
self.secr['fator_2'].append(self.linha[38:43])
self.secr['usi_3'].append(self.linha[44:47])
self.secr['fator_3'].append(self.linha[48:53])
self.secr['usi_4'].append(self.linha[54:57])
self.secr['fator_4'].append(self.linha[58:63])
self.secr['usi_5'].append(self.linha[64:67])
self.secr['fator_5'].append(self.linha[68:73])
continue
if mne_sigla == 'cr':
self.cr['mne'].append(self.linha[:2])
self.cr['num'].append(self.linha[4:7])
self.cr['nome'].append(self.linha[9:21])
self.cr['gr'].append(self.linha[24:26])
self.cr['A0'].append(self.linha[27:42])
self.cr['A1'].append(self.linha[43:58])
self.cr['A2'].append(self.linha[59:74])
self.cr['A3'].append(self.linha[75:90])
self.cr['A4'].append(self.linha[91:106])
self.cr['A5'].append(self.linha[107:122])
self.cr['A6'].append(self.linha[123:138])
continue
if mne_sigla == 'r11':
self.r11['mne'].append(self.linha[:3])
self.r11['di'].append(self.linha[4:6])
self.r11['hi'].append(self.linha[7:9])
self.r11['mi'].append(self.linha[10:11])
self.r11['df'].append(self.linha[12:14])
self.r11['hf'].append(self.linha[15:17])
self.r11['mf'].append(self.linha[18:19])
self.r11['cotaIni'].append(self.linha[20:30])
self.r11['varhora'].append(self.linha[30:40])
self.r11['vardia'].append(self.linha[40:50])
self.r11['coef'].append(self.linha[59:164])
continue
if mne_sigla == 'vr':
self.vr['mne'].append(self.linha[:2])
self.vr['dia'].append(self.linha[4:6])
self.vr['mneumo_verao'].append(self.linha[9:12])
continue
if mne_sigla == 'pd':
self.pd['mne'].append(self.linha[:2])
self.pd['tol_perc'].append(self.linha[3:9])
self.pd['tol_MW'].append(self.linha[12:22])
continue
if mne_sigla == 'vm':
self.vm['mne'].append(self.linha[:2])
self.vm['ind'].append(self.linha[4:7])
self.vm['di'].append(self.linha[8:10])
self.vm['hi'].append(self.linha[11:13])
self.vm['mi'].append(self.linha[14:15])
self.vm['df'].append(self.linha[16:18])
self.vm['hf'].append(self.linha[19:21])
self.vm['mf'].append(self.linha[22:23])
self.vm['taxa_enchimento'].append(self.linha[24:34])
continue
if mne_sigla == 'df':
self.df['mne'].append(self.linha[:2])
self.df['ind'].append(self.linha[4:7])
self.df['di'].append(self.linha[8:10])
self.df['hi'].append(self.linha[11:13])
self.df['mi'].append(self.linha[14:15])
self.df['df'].append(self.linha[16:18])
self.df['hf'].append(self.linha[19:21])
self.df['mf'].append(self.linha[22:23])
self.df['taxa_descarga'].append(self.linha[24:34])
continue
if mne_sigla == 'me':
self.me['mne'].append(self.linha[:2])
self.me['ind'].append(self.linha[4:7])
self.me['di'].append(self.linha[8:10])
self.me['hi'].append(self.linha[11:13])
self.me['mi'].append(self.linha[14:15])
self.me['df'].append(self.linha[16:18])
self.me['hf'].append(self.linha[19:21])
self.me['mf'].append(self.linha[22:23])
self.me['fator'].append(self.linha[24:34])
continue
if mneumo == 'meta cjsist':
self.meta_cjsist['mneumo'].append(self.linha[:13])
self.meta_cjsist['ind'].append(self.linha[14:17])
self.meta_cjsist['nome'].append(self.linha[18:20])
continue
if mneumo == 'meta receb':
self.meta_sist['mne'].append(self.linha[:13])
self.meta_sist['ind'].append(self.linha[14:17])
self.meta_sist['tp'].append(self.linha[19:21])
self.meta_sist['num'].append(self.linha[22:23])
self.meta_sist['meta'].append(self.linha[24:34])
self.meta_sist['tol_MW'].append(self.linha[34:44])
self.meta_sist['tol_perc'].append(self.linha[44:54])
continue
if mneumo == 'meta gter':
self.meta_usit['mne'].append(self.linha[:13])
self.meta_usit['ind'].append(self.linha[14:17])
self.meta_usit['tp'].append(self.linha[19:21])
self.meta_usit['num'].append(self.linha[22:23])
self.meta_usit['meta'].append(self.linha[24:34])
self.meta_usit['tol_MW'].append(self.linha[34:44])
self.meta_usit['tol_perc'].append(self.linha[44:54])
continue
if mne_sigla == 'sh':
self.sh['mne'].append(self.linha[:2])
self.sh['flag_simul'].append(self.linha[4:5])
self.sh['flag_pl'].append(self.linha[9:10])
self.sh['num_min'].append(self.linha[14:17])
self.sh['num_max'].append(self.linha[19:22])
self.sh['flag_quebra'].append(self.linha[24:25])
self.sh['ind_1'].append(self.linha[29:32])
self.sh['ind_2'].append(self.linha[34:37])
self.sh['ind_3'].append(self.linha[39:42])
self.sh['ind_4'].append(self.linha[44:47])
self.sh['ind_5'].append(self.linha[49:52])
continue
if mne_sigla == 'tf':
self.tf['mne'].append(self.linha[:2])
self.tf['custo'].append(self.linha[4:14])
continue
if mne_sigla == 'rs':
self.rs['mne'].append(self.linha[:2])
self.rs['cod'].append(self.linha[3:6])
self.rs['ind'].append(self.linha[7:11])
self.rs['subs'].append(self.linha[12:16])
self.rs['tp'].append(self.linha[22:26])
self.rs['comentario'].append(self.linha[27:39])
continue
if mne_sigla == 'sp':
self.sp['mne'].append(self.linha[:2])
self.sp['flag'].append(self.linha[4:5])
continue
if mne_sigla == 'ps':
self.ps['mne'].append(self.linha[:2])
self.ps['flag'].append(self.linha[4:5])
continue
if mne_sigla == 'pp':
self.pp['mne'].append(self.linha[:2])
self.pp['flag'].append(self.linha[3:4])
self.pp['iteracoes'].append(self.linha[5:8])
self.pp['num'].append(self.linha[9:12])
self.pp['tp'].append(self.linha[13:14])
continue
except Exception as err:
if isinstance(err, StopIteration):
self.bloco_tm['df'] = pd.DataFrame(self.tm)
self.bloco_sist['df'] = pd.DataFrame(self.sist)
self.bloco_ree['df'] = pd.DataFrame(self.ree)
self.bloco_uh['df'] = pd.DataFrame(self.uh)
self.bloco_tviag['df'] = pd.DataFrame(self.tviag)
self.bloco_ut['df'] = pd.DataFrame(self.ut)
self.bloco_usie['df'] = pd.DataFrame(self.usie)
self.bloco_dp['df'] = pd.DataFrame(self.dp)
self.bloco_de['df'] = pd.DataFrame(self.de)
self.bloco_cd['df'] = pd.DataFrame(self.cd)
self.bloco_ri['df'] = pd.DataFrame(self.ri)
self.bloco_ia['df'] = pd.DataFrame(self.ia)
self.bloco_rd['df'] = pd.DataFrame(self.rd)
self.bloco_rivar['df'] = pd.DataFrame(self.rivar)
self.bloco_it['df'] = pd.DataFrame(self.it)
self.bloco_gp['df'] = pd.DataFrame(self.gp)
self.bloco_ni['df'] = pd.DataFrame(self.ni)
self.bloco_ve['df'] = | pd.DataFrame(self.ve) | pandas.DataFrame |
"""
Copyright 2018 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from testfixtures import Replacer
from testfixtures.mock import Mock
from gs_quant.timeseries import EdrDataReference
from gs_quant.timeseries.backtesting import Basket, basket_series, MqValueError, MqTypeError, RebalFreq, date, \
DataContext, np
def test_basket_series():
dates = [
datetime.datetime(2019, 1, 1),
datetime.datetime(2019, 1, 2),
datetime.datetime(2019, 1, 3),
datetime.datetime(2019, 1, 4),
datetime.datetime(2019, 1, 5),
datetime.datetime(2019, 1, 6),
]
x = pd.Series([100.0, 101, 103.02, 100.9596, 100.9596, 102.978792], index=dates)
y = pd.Series([100.0, 100, 100, 100, 100, 100], index=dates)
assert_series_equal(x, basket_series([x], [1]))
assert_series_equal(x, basket_series([x, x], [0.5, 0.5]))
assert_series_equal(x, basket_series([x, x, x], [1 / 3, 1 / 3, 1 / 3]))
assert_series_equal(x, basket_series([x, y], [1, 0]))
assert_series_equal(y, basket_series([x, y], [0, 1]))
with pytest.raises(MqValueError):
basket_series([x, y], [1])
with pytest.raises(MqTypeError):
basket_series([1, 2, 3], [1])
dates = [
datetime.datetime(2019, 1, 1),
datetime.datetime(2019, 1, 2),
datetime.datetime(2019, 1, 3),
datetime.datetime(2019, 1, 4),
datetime.datetime(2019, 1, 5),
datetime.datetime(2019, 1, 6),
datetime.datetime(2019, 2, 1),
datetime.datetime(2019, 2, 2),
datetime.datetime(2019, 2, 3),
datetime.datetime(2019, 2, 4),
datetime.datetime(2019, 2, 5),
datetime.datetime(2019, 2, 6),
]
mreb = pd.Series(
[100.0, 101, 103.02, 100.9596, 100.9596, 102.978792,
100.0, 101, 103.02, 100.9596, 100.9596, 102.978792],
index=dates)
assert_series_equal(mreb, basket_series([mreb], [1], rebal_freq=RebalFreq.MONTHLY))
def _mock_spot_data():
dates = pd.date_range(start='2021-01-01', periods=6)
x = pd.DataFrame({'spot': [100.0, 101, 103.02, 100.9596, 100.9596, 102.978792]}, index=dates)
x['assetId'] = 'MA4B66MW5E27U9VBB94'
y = pd.DataFrame({'spot': [100.0, 100, 100, 100, 100, 100]}, index=dates)
y['assetId'] = 'MA4B66MW5E27UAL9SUX'
return x.append(y)
def _mock_spot_data_feb():
dates_feb = pd.date_range(start='2021-02-01', periods=6)
x = pd.DataFrame({'spot': [100.0, 101.5, 106.02, 100.1, 105.3, 102.9]}, index=dates_feb)
x['assetId'] = 'MA4B66MW5E27U9VBB94'
y = pd.DataFrame({'spot': [100.0, 101.5, 100.02, 98.1, 95.3, 93.9]}, index=dates_feb)
y['assetId'] = 'MA4B66MW5E27UAL9SUX'
return x.append(y)
def test_basket_price():
with pytest.raises(MqValueError):
Basket(['AAPL UW'], [0.1, 0.9], RebalFreq.MONTHLY)
dates = pd.DatetimeIndex([date(2021, 1, 1), date(2021, 1, 2), date(2021, 1, 3), date(2021, 1, 4), date(2021, 1, 5),
date(2021, 1, 6)])
dates_feb = pd.DatetimeIndex([date(2021, 2, 1), date(2021, 2, 2), date(2021, 2, 3), date(2021, 2, 4),
date(2021, 2, 5), date(2021, 2, 6)])
replace = Replacer()
mock_data = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_data.side_effect = [_mock_spot_data(), _mock_spot_data_feb()]
mock_asset = replace('gs_quant.timeseries.backtesting.GsAssetApi.get_many_assets_data', Mock())
mock_asset.return_value = [{'id': 'MA4B66MW5E27U9VBB94', 'bbid': 'AAPL UW'},
{'id': 'MA4B66MW5E27UAL9SUX', 'bbid': 'MSFT UW'}]
a_basket = Basket(['AAPL UW', 'MSFT UW'], [0.1, 0.9], RebalFreq.MONTHLY)
expected = pd.Series([100.0, 100.1, 100.302, 100.09596, 100.09596, 100.297879], index=dates)
with DataContext('2021-01-01', '2021-01-06'):
actual = a_basket.price()
| assert_series_equal(actual, expected) | pandas.testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 16:44:24 2020
@author: Borja
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
"""
- Ultra Trail Mont Blanc. Clasificación desde 2003 hasta 2017.
https://www.kaggle.com/ceruleansea/ultratrail-du-montblanc-20032017?select=utmb_2017.csv
-Ultra Trail Mont Blanc, Clasificación desde 2017 hasta 2019.
https://www.kaggle.com/purpleyupi/utmb-results
Datos guardados en 'Data/csv/*.csv'
"""
utmb_2003 = pd.read_csv('Data/csv/utmb_2003.csv', sep=',', decimal='.')
utmb_2004 = pd.read_csv('Data/csv/utmb_2004.csv', sep=',', decimal='.')
utmb_2005 = pd.read_csv('Data/csv/utmb_2005.csv', sep=',', decimal='.')
utmb_2006 = pd.read_csv('Data/csv/utmb_2006.csv', sep=',', decimal='.')
utmb_2007 = pd.read_csv('Data/csv/utmb_2007.csv', sep=',', decimal='.')
utmb_2008 = pd.read_csv('Data/csv/utmb_2008.csv', sep=',', decimal='.')
utmb_2009 = pd.read_csv('Data/csv/utmb_2009.csv', sep=',', decimal='.')
utmb_2010 = pd.read_csv('Data/csv/utmb_2010.csv', sep=',', decimal='.')
utmb_2011 = pd.read_csv('Data/csv/utmb_2011.csv', sep=',', decimal='.')
utmb_2012 = pd.read_csv('Data/csv/utmb_2012.csv', sep=',', decimal='.')
utmb_2013 = pd.read_csv('Data/csv/utmb_2013.csv', sep=',', decimal='.')
utmb_2014 = pd.read_csv('Data/csv/utmb_2014.csv', sep=',', decimal='.')
utmb_2015 = pd.read_csv('Data/csv/utmb_2015.csv', sep=',', decimal='.')
utmb_2016 = pd.read_csv('Data/csv/utmb_2016.csv', sep=',', decimal='.')
utmb_2017 = pd.read_csv('Data/csv/utmb_2017.csv', sep=',', decimal='.')
"""
Los datos obtenidos de la segunda fuente contienen en una celda el nombre del
corredor y seguido por un espacio también el nombre del equipo que pertenecen.
"""
def clean_data(utmb_year):
name_s2 = utmb_year["name"]
nationality_s2 = utmb_year["nationality"]
time_s2 = utmb_year["time"]
data_cleaned_year = pd.DataFrame({"name": name_s2, "nationality": nationality_s2, "time": time_s2})
del(name_s2)
del(nationality_s2)
del(time_s2)
return data_cleaned_year
def rename_column(utmb_year, string_old, string_new):
utmb_year = utmb_year.rename(columns={string_old:string_new})
return utmb_year
def clean_and_rename_data(utmb_year, string_new):
utmb_year_clean = clean_data(utmb_year)
utmb_year_clean = rename_column(utmb_year_clean, 'time', string_new)
return utmb_year_clean
utmb_clean_2003 = clean_and_rename_data(utmb_2003, 'time_2003')
utmb_clean_2004 = clean_and_rename_data(utmb_2004, 'time_2004')
utmb_clean_2005 = clean_and_rename_data(utmb_2005, 'time_2005')
utmb_clean_2006 = clean_and_rename_data(utmb_2006, 'time_2006')
utmb_clean_2007 = clean_and_rename_data(utmb_2007, 'time_2007')
utmb_clean_2008 = clean_and_rename_data(utmb_2008, 'time_2008')
utmb_clean_2009 = clean_and_rename_data(utmb_2009, 'time_2009')
utmb_clean_2010 = clean_and_rename_data(utmb_2010, 'time_2010')
utmb_clean_2011 = clean_and_rename_data(utmb_2011, 'time_2011')
utmb_clean_2012 = clean_and_rename_data(utmb_2012, 'time_2012')
utmb_clean_2013 = clean_and_rename_data(utmb_2013, 'time_2013')
utmb_clean_2014 = clean_and_rename_data(utmb_2014, 'time_2014')
utmb_clean_2015 = clean_and_rename_data(utmb_2015, 'time_2015')
utmb_clean_2016 = clean_and_rename_data(utmb_2016, 'time_2016')
utmb_clean_2017 = clean_and_rename_data(utmb_2017, 'time_2017')
"""
Merge all the data in a single data frame.
"""
utmb_clean_multiyear = pd.merge(utmb_clean_2003, utmb_clean_2004,how='outer',on=['name', 'nationality'], copy=True)
utmb_clean_multiyear = pd.merge(utmb_clean_multiyear, utmb_clean_2005,how='outer',on=['name', 'nationality'], copy=True)
utmb_clean_multiyear = pd.merge(utmb_clean_multiyear, utmb_clean_2006,how='outer',on=['name', 'nationality'], copy=True)
utmb_clean_multiyear = pd.merge(utmb_clean_multiyear, utmb_clean_2007,how='outer',on=['name', 'nationality'], copy=True)
utmb_clean_multiyear = pd.merge(utmb_clean_multiyear, utmb_clean_2008,how='outer',on=['name', 'nationality'], copy=True)
utmb_clean_multiyear = pd.merge(utmb_clean_multiyear, utmb_clean_2009,how='outer',on=['name', 'nationality'], copy=True)
utmb_clean_multiyear = pd.merge(utmb_clean_multiyear, utmb_clean_2010,how='outer',on=['name', 'nationality'], copy=True)
utmb_clean_multiyear = | pd.merge(utmb_clean_multiyear, utmb_clean_2011,how='outer',on=['name', 'nationality'], copy=True) | pandas.merge |
#### ----- LIBRARIES ----- ####
import os
import sys
import time
import random
import shutil
import time
import smtplib
import ssl
import pandas as pd
from selenium import webdriver
from bs4 import BeautifulSoup
#### ----- FUNCTIONS DEFINITION ----- ####
## ----- Core scraping functions ----- ##
def get_profile_info():
'''Scrape username, number of posts, followers and following. Returns the number of followers and
a string formatted to be saved as the line of a csv file.'''
username_xpath = '//*[@id="react-root"]/section/main/div/header/section/div[1]/h1'
post_xpath = '//*[@id="react-root"]/section/main/div/header/section/ul/li[1]/span/span'
followers_xpath = '//*[@id="react-root"]/section/main/div/header/section/ul/li[2]/a/span'
following_xpath = '//*[@id="react-root"]/section/main/div/header/section/ul/li[3]/a/span'
username_element = driver.find_element_by_xpath(username_xpath)
post_element = driver.find_element_by_xpath(post_xpath)
followers_element = driver.find_element_by_xpath(followers_xpath)
following_element = driver.find_element_by_xpath(following_xpath)
username = username_element.get_attribute('innerText')
post = post_element.get_attribute('innerText').replace(',', '')
followers = followers_element.get_attribute('innerText').replace(',', '')
following = following_element.get_attribute('innerText').replace(',', '')
print('Scraping metadata for %s...'% username)
time.sleep(random.randint(1,3))
string = username + ',' + post + ',' + followers + ',' + following
return int(followers), string
def get_followers_list():
'''Parse the html of the page to get list of followers - and a lot of html rubbish. Then iterate through
the list to clean up the rubbish and add the usernames to a set. Return the set as a list.'''
# Parse the html of the page
followers_list_xpath = '/html/body/div[3]/div/div/div[2]/ul/div'
followers_list_element = driver.find_element_by_xpath(followers_list_xpath)
followers_list_html = followers_list_element.get_attribute('innerHTML')
followers_list_parsed = BeautifulSoup(followers_list_html, "html.parser").find_all('a')
# Save the list of followers in a set
followers_set = set()
for follower in followers_list_parsed:
username = follower.get('href').replace('/', '')
followers_set.add(username)
print('\nFollowers list returned succesfully!')
return list(followers_set)
def profile_scraper(username):
'''Wrapper for the bunch of functions that together scrape the profile, write the metadata
and returns a list of followers'''
go_to_profile(username)
n_followers, profile_info = get_profile_info()
write_metadata(profile_info)
print('-'*30)
followers_window = get_followers_window()
scroll_followers_window(n_followers, followers_window)
followers_list = get_followers_list()
print('-'*30)
return followers_list
## ----- Browser related functions ----- ##
def start_browser():
'''Initiates a browser window and load instagram.com'''
driver_path = '/Users/andrea/Desktop/Instagram/chromedriver'
driver = webdriver.Chrome(executable_path=driver_path)
driver.set_window_size(1000,1000)
driver.get("https://www.instagram.com/accounts/login/")
print('-'*30)
print('Starting browser...')
time.sleep(10)
print('Browser started succesfully!')
return driver
def login(username, password):
'''Login to instagram with the given credentials'''
# Fill in username
user_box = driver.find_element_by_name("username")
user_box.click()
user_box.send_keys(username)
time.sleep(random.randint(1,5))
# Fill in password
psw_box = driver.find_element_by_name("password")
psw_box.click()
psw_box.send_keys(password)
time.sleep(random.randint(1,5))
# Click login button
login_button_xpath = '//*[@id="react-root"]/section/main/div/article/div/div[1]/div/form/div[3]'
login_button = driver.find_element_by_xpath(login_button_xpath)
login_button.click()
print('Logging in to instagram...')
time.sleep(random.randint(1,5))
def go_to_profile(username):
'''Go to the profile page of a given user'''
print('Loading the profile of %s...'% username)
driver.get('https://www.instagram.com/' + username)
time.sleep(2)
def get_followers_window():
'''Click on the follower button and return the follower window element'''
followers_button_xpath = '//*[@id="react-root"]/section/main/div/header/section/ul/li[2]'
followers_window_xpath = '/html/body/div[3]/div/div/div[2]'
followers_button_element = driver.find_element_by_xpath(followers_button_xpath)
followers_button_element.click()
time.sleep(random.randint(1,3))
followers_window_element = driver.find_element_by_xpath(followers_window_xpath)
print('Getting followers window...')
return followers_window_element
def scroll_followers_window(n_followers, followers_window):
'''Use a js script to scroll the followers window until the end, so that the names of all the followers
are loaded in the html of the page'''
initial_scroll_height = 100
scroll_height = initial_scroll_height
n_followers_loaded = 0
n_followers_constant = 0
followers_list_xpath = '/html/body/div[3]/div/div/div[2]/ul/div'
while n_followers_loaded < n_followers:
# Build and execute the js script
script = "arguments[0].scrollTop = " + str(scroll_height)
driver.execute_script(script, followers_window)
# Save the number of followers loaded before this iteration
n_followers_loaded_before = n_followers_loaded
# Update the number of followers loaded
followers_list_element = driver.find_element_by_xpath(followers_list_xpath)
followers_list_html = followers_list_element.get_attribute('innerHTML')
followers_list_parsed = BeautifulSoup(followers_list_html, "html.parser")
n_followers_loaded = len(followers_list_parsed.find_all('li'))
# Count for how many iterations the number of followers loaded has remained constant
if n_followers_loaded_before == n_followers_loaded:
n_followers_constant += 1
else:
n_followers_constant = 0
# Break the while loop if the number of followers has remained constant for too long
if n_followers_constant > 100:
write_error(kind='timeout')
break
# Increase scroll height. Start slowly, increase speed after some iterations
if scroll_height <= 1000:
scroll_height = scroll_height + random.randint(50, 150)
else:
scroll_height = scroll_height + random.randint(500, 1500)
sys.stdout.write('\rScrolling followers window: %s of %s followers currently loaded.' % (n_followers_loaded, n_followers))
time.sleep(random.randint(1,3))
## ----- Gephi related functions ----- ##
def merge_all_csv():
'''Read all the single csv files scraped and merge them all in a single dataframe'''
csv_names_list = os.listdir(cwd + '/data/followers')
merged_df = pd.DataFrame(columns=['followers', 'username'])
for csv in csv_names_list:
temp_df = pd.read_csv(cwd + '/data/followers/%s' %csv)
merged_df = | pd.concat([merged_df, temp_df], axis=0) | pandas.concat |
# coding=utf-8
# Author: <NAME>
# Date: Sept 02, 2019
#
# Description: Reads a MultiLayer network (HS, MM & DM) and prints information.
#
#
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import networkx as nx
from utils import get_network_layer, get_network_by_attribute, ensurePathExists
from tabulate import tabulate
from itertools import combinations
def df2md(df, y_index=False, *args, **kwargs):
blob = tabulate(df, headers='keys', tablefmt='pipe', *args, **kwargs)
if not y_index:
return '\n'.join(['| {}'.format(row.split('|', 2)[-1]) for row in blob.split('\n')])
return blob
if __name__ == '__main__':
celltypes = ['spermatocyte', 'spermatogonia', 'spermatid', 'enterocyte', 'neuron', 'muscle']
#
# Node/Edge stats on complete network
#
r = []
for celltype in celltypes:
print('Loading {celltype:s} network'.format(celltype=celltype))
rGfile_gpickle = '../../04-network/results/network/{celltype:s}/net-{celltype:s}-{network:s}.gpickle'.format(celltype=celltype, network='full')
G = nx.read_gpickle(rGfile_gpickle)
for layer in ['HS', 'MM', 'DM']:
print('Separate {layer:s} layer'.format(layer=layer))
Gt = get_network_layer(G, layer)
# Number of nodes/edges
n_nodes = Gt.number_of_nodes()
n_edges = Gt.number_of_edges()
r.append((celltype, layer, n_nodes, n_edges))
print('# Number of nodes/edges in each layer of the full network\n')
df_stat = pd.DataFrame(r, columns=['celltype', 'species', '#-nodes', '#-edges'])
print(df2md(df_stat, floatfmt='.4f'))
file = 'results/stats-full-network.csv'
ensurePathExists(file)
df_stat.to_csv(file)
#
# Node/Edge stats on threshold/conserved Network
#
network = 'conserved' # ['thr', 'conserved']
threshold = 0.5
threshold_str = str(threshold).replace('.', 'p')
#
if network == 'conserved':
celltypes = ['spermatocyte', 'enterocyte']
else:
celltypes = ['spermatocyte', 'spermatogonia', 'spermatid', 'enterocyte', 'neuron', 'muscle']
#
r = []
for celltype in celltypes:
print('Reading {celltype:s}-{network:s}-{threshold:s} network'.format(celltype=celltype, network=network, threshold=threshold_str))
path_net = '../../04-network/results/network/{celltype:s}/'.format(celltype=celltype)
if network in ['thr', 'conserved']:
rGfile_gpickle = path_net + 'net-{celltype:s}-{network:s}-{threshold:s}.gpickle'.format(celltype=celltype, network=network, threshold=threshold_str)
elif network == 'full':
rGfile_gpickle = path_net + 'net-{celltype:s}-{network:s}.gpickle'.format(celltype=celltype, network=network)
G = nx.read_gpickle(rGfile_gpickle)
for layer in ['HS', 'MM', 'DM']:
print('Separate {layer:s} layer'.format(layer=layer))
Gt = get_network_layer(G, layer)
# Number of nodes/edges
n_nodes = Gt.number_of_nodes()
n_edges = Gt.number_of_edges()
# Number of components (islands)
n_components = nx.number_connected_components(Gt)
# Largest Component
Gtlc = max(nx.connected_component_subgraphs(Gt), key=len)
n_nodes_largest_component = Gtlc.number_of_nodes()
n_edges_largest_component = Gtlc.number_of_edges()
for weight in ['combined_score', 'textmining', 'database', 'experiments', 'coexpression', 'cooccurence', 'fusion', 'neighborhood']:
edges = [(i, j) for i, j, d in Gt.edges(data=True) if weight in d]
Gtw = Gt.edge_subgraph(edges).copy()
# Number of nodes/edges
n_nodes = Gtw.number_of_nodes()
n_edges = Gtw.number_of_edges()
# Number of components (islands)
n_components = nx.number_connected_components(Gtw)
# Largest Component
if n_edges > 0:
Gtlc = max(nx.connected_component_subgraphs(Gtw), key=len)
n_nodes_largest_component = Gtlc.number_of_nodes()
n_edges_largest_component = Gtlc.number_of_edges()
else:
n_nodes_largest_component = 0
n_edges_largest_component = 0
r.append((celltype, layer, weight, n_nodes, n_edges, n_components, n_nodes_largest_component, n_edges_largest_component))
print('# Number of nodes/edges in the layer of the thresholded>0.5 network\n')
df_stat = pd.DataFrame(r, columns=['celltype', 'species', 'edge-type', '#-nodes', '#-edges', '#-comps.', '#-nodes-in-lgt-comp.', '#-edges-lgt-comp.'])
print(df2md(df_stat, floatfmt='.4f'))
if network in ['thr', 'conserved']:
file = 'results/stats-{network:s}-{threshold:s}-network.csv'.format(network=network, threshold=threshold_str)
elif network == 'full':
file = 'results/stats-{network:s}-network.csv'.format(network=network)
ensurePathExists(file)
df_stat.to_csv(file)
#
# Pairwise conserved genes
#
network = 'thr'
threshold = 0.5
threshold_str = str(threshold).replace('.', 'p')
biotype = 'protein_coding'
r = []
for celltype in celltypes:
#
print('Loading {celltype:s}-{network:s} Network'.format(celltype=celltype, network=network))
path_net = '../../04-network/results/network/{celltype:s}/'.format(celltype=celltype)
if network == 'thr':
rGfile_gpickle = path_net + 'net-{celltype:s}-{network:s}-{threshold:s}.gpickle'.format(celltype=celltype, network=network, threshold=threshold_str)
elif network == 'full':
rGfile_gpickle = path_net + 'net-{celltype:s}-{network:s}.gpickle'.format(celltype=celltype, network=network)
G = nx.read_gpickle(rGfile_gpickle)
print('Separate Layers')
HSG = get_network_layer(G, 'HS')
MMG = get_network_layer(G, 'MM')
DMG = get_network_layer(G, 'DM')
print("Select nodes where biotype='{biotype:s}'".format(biotype=biotype))
HSG = get_network_by_attribute(HSG, attribute='biotype', value=biotype)
MMG = get_network_by_attribute(MMG, attribute='biotype', value=biotype)
DMG = get_network_by_attribute(DMG, attribute='biotype', value=biotype)
# Pairs
for (layer_i, Gi), (layer_j, Gj) in combinations([('HS', HSG), ('MM', MMG), ('DM', DMG)], 2):
print("Comparing: {layer_i:s} with {layer_j:s}".format(layer_i=layer_i, layer_j=layer_j))
pair = layer_i + 'x' + layer_j
genes_i = [*Gi.nodes()]
genes_j = [*Gj.nodes()]
genes_ij = genes_i + genes_j
# Only genes between these species
Gtmp = nx.subgraph(G, genes_ij).copy()
# Remove intra edges
remove_intra_edges = [(i, j) for i, j, d in Gtmp.edges(data=True) if d.get('type', None) == 'intra']
Gtmp.remove_edges_from(remove_intra_edges)
# Remove isolates
remove_isolates_nodes = list(nx.isolates(Gtmp))
Gtmp.remove_nodes_from(remove_isolates_nodes)
# Keep only homolgos
Gitmp = nx.subgraph(Gi, Gtmp).copy()
Gjtmp = nx.subgraph(Gj, Gtmp).copy()
# Number of nodes/edges
n_nodes_i = Gitmp.number_of_nodes()
n_nodes_j = Gjtmp.number_of_nodes()
n_edges_i = Gitmp.number_of_edges()
n_edges_j = Gjtmp.number_of_edges()
r.append((celltype, pair, layer_i, n_nodes_i, n_edges_i, layer_j, n_nodes_j, n_edges_j))
print('# Pairwise number of conserved nodes/edges of the full network\n')
df_stat = | pd.DataFrame(r, columns=['celltype', 'specie-pair', 'layer-i', '#-nodes-i', '#-edges-i', 'layer-j', '#-nodes-j', '#-edges-j']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from data import *
from resource import *
from utils import (load_dataframe, explode, merge_all, convert_dtype)
from utils import mock_name as _mm
f_item_meta_gte_50000 = FeatResource(_mm('item_metadata_gte_50000'))
f_item_int_cnt = FeatResource(_mm('item_interaction_cnt'))
f_item_expo_cnt = FeatResource(_mm('item_exposure_count'))
f_sess_basic = TrainTestResource(FeatResource, _mm('session_basic_%s'))
f_sess_int = TrainTestResource(FeatResource, _mm('session_interaction_%s'))
f_sess_imp_eq = TrainTestResource(FeatResource, _mm('session_impressions_eq_%s'))
f_sess_imp = TrainTestResource(FeatResource, _mm('session_imp_%s'))
f_sess_int_price = TrainTestResource(FeatResource, _mm('session_interaction_price_%s'))
f_sess_le = TrainTestResource(FeatResource, _mm('session_label_encode_%s'))
f_si_basic = TrainTestResource(FeatResource, _mm('session_item_basic_%s'))
f_si_first_last = TrainTestResource(FeatResource, _mm('session_item_first_last_%s'))
f_si_int = TrainTestResource(FeatResource, _mm('session_item_interaction_%s'))
f_si_diff_last_int = TrainTestResource(FeatResource, _mm('session_item_diff_last_interaction_index_%s'))
f_si_diff_imp_price = TrainTestResource(FeatResource, _mm('session_item_price_div_imp_price_%s'))
f_top30 = TrainTestResource(FeatResource, _mm('top_30_feats_%s'))
f_top100 = TrainTestResource(FeatResource, _mm('top_100_feats_%s'))
f_si_sim = TrainTestResource(FeatResource, _mm('similarity_pair_%s'))
f_si_cmp = TrainTestResource(FeatResource, _mm('compare_pair_%s'))
f_si_win = TrainTestResource(FeatResource, _mm('win_pair_%s'))
@register(out=f_item_meta_gte_50000, inp=t_item_metadata_sp)
def item_metadata_gte_50000():
dd = t_item_metadata_sp.load()
cols_meta = [col for col in dd.columns if col != 'item_id']
meta_count = dd[cols_meta].sum()
meta_keep = list(meta_count[meta_count >= 50000].index)
dd = dd[['item_id'] + meta_keep]
dd['item_id'] = dd['item_id'].astype(int)
for col in meta_keep:
dd[col] = dd[col].astype('float32')
f_item_meta_gte_50000.save(dd)
@register(out=f_item_int_cnt, inp=i_tr_te)
def item_interaction_cnt():
cols_keep = ['user_id', 'session_id', 'reference', 'action_type']
df_train = i_tr_te.train.load(columns=cols_keep)
df_test = i_tr_te.test.load(columns=cols_keep)
df = | pd.concat([df_train, df_test]) | pandas.concat |
import argparse
import os
from io import BytesIO
from time import ctime
from urllib.parse import urljoin
from urllib.request import urlopen
from zipfile import ZipFile
import boto3
import pandas as pd
from column_map_benchmark import benchmark_column_map
from foreign_key_benchmark import benchmark_foreign_key
from how_lineage_benchmark import benchmark_how_lineage
from primary_key_benchmark import benchmark_primary_key
BUCKET_NAME = 'tracer-data'
DATA_URL = 'http://{}.s3.amazonaws.com/'.format(BUCKET_NAME)
def download(data_dir):
"""Download benchmark datasets from S3.
This downloads the benchmark datasets from S3 into the target folder in an
uncompressed format. It skips datasets that have already been downloaded.
Please make sure an appropriate S3 credential is installed before you call
this method.
Args:
data_dir: The directory to download the datasets to.
Returns:
A DataFrame describing the downloaded datasets.
Raises:
NoCredentialsError: If AWS S3 credentials are not found.
"""
rows = []
client = boto3.client('s3')
for dataset in client.list_objects(Bucket=BUCKET_NAME)['Contents']:
if not '.zip' in dataset['Key']:
continue
rows.append(dataset)
dataset_name = dataset['Key'].replace(".zip", "")
dataset_path = os.path.join(data_dir, dataset_name)
if os.path.exists(dataset_path):
dataset["Status"] = "Skipped"
print("Skipping %s" % dataset_name)
else:
dataset["Status"] = "Downloaded"
print("Downloading %s" % dataset_name)
with urlopen(urljoin(DATA_URL, dataset['Key'])) as fp:
with ZipFile(BytesIO(fp.read())) as zipfile:
zipfile.extractall(dataset_path)
return pd.DataFrame(rows)
def start_with(target, source):
return len(source) <= len(target) and target[:len(source)] == source
def aggregate(cmd_name):
cmd_abbrv = {'column': 'ColMap_st',
'foreign': 'ForeignKey_st',
'primary': 'PrimaryKey_st'
}
if cmd_name not in cmd_abbrv:
print("Invalid command name!")
return None # invalid command name
cmd_name = cmd_abbrv[cmd_name]
dfs = []
for file in os.listdir("Reports"):
if start_with(file, cmd_name):
dfs.append(pd.read_csv("Reports/" + file))
if len(dfs) == 0:
print("No available test results!")
return None
df = | pd.concat(dfs, axis=0, ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
##########
Processing
##########
*Created on Thu Jun 1 14:15 2017 by <NAME>*
Processing results from the CellPainting Assay in the Jupyter notebook.
This module provides the DataSet class and its methods.
Additional functions in this module act on pandas DataFrames."""
import time
import glob
import os.path as op
from collections import Counter
import xml.etree.ElementTree as ET
import pickle
import pandas as pd
import numpy as np
from rdkit.Chem import AllChem as Chem
from rdkit import DataStructs
from IPython.core.display import HTML
from . import tools as cpt
from .config import ACT_PROF_PARAMETERS
from .config import LIMIT_SIMILARITY_L, LIMIT_CELL_COUNT_L, LIMIT_ACTIVITY_L
try:
from misc_tools import apl_tools
AP_TOOLS = True
#: Library version
VERSION = apl_tools.get_commit(__file__)
# I use this to keep track of the library versions I use in my project notebooks
print("{:45s} (commit: {})".format(__name__, VERSION))
except ImportError:
AP_TOOLS = False
print("{:45s} ({})".format(__name__, time.strftime("%y%m%d-%H:%M", time.localtime(op.getmtime(__file__)))))
try:
from . import resource_paths as cprp
except ImportError:
from . import resource_paths_templ as cprp
print("* Resource paths not found, stub loaded.")
print(" Automatic loading of resources will not work,")
print(" please have a look at resource_paths_templ.py")
FINAL_PARAMETERS = ['Metadata_Plate', 'Metadata_Well', 'plateColumn', 'plateRow',
"Compound_Id", 'Container_Id', "Well_Id", "Producer", "Pure_Flag", "Toxic",
"Rel_Cell_Count", "Known_Act", "Trivial_Name", 'WellType', 'Conc_uM',
"Activity", "Act_Profile", "Plate", "Smiles"]
DROP_FROM_NUMBERS = ['plateColumn', 'plateRow', 'Conc_uM', "Compound_Id"]
DROP_GLOBAL = ["PathName_CellOutlines", "URL_CellOutlines", 'FileName_CellOutlines',
'ImageNumber', 'Metadata_Site', 'Metadata_Site_1', 'Metadata_Site_2']
QUANT = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
DEBUG = False
def debug_print(txt, val):
if DEBUG:
txt = txt + ":"
print("DEBUG {:20s}".format(txt), val)
class DataSet():
def __init__(self, log=True):
self.data = pd.DataFrame()
self.fields = {"plateColumn": "Metadata_Plate",
"WellType": "WellType", "ControlWell": "Control", "CompoundWell": "Compound"}
self.log = log
def __getitem__(self, item):
res = self.data[item]
if isinstance(res, pd.DataFrame):
result = DataSet()
result.data = res
result.print_log("subset")
else:
result = res
return result
def __getattr__(self, name):
"""Try to call undefined methods on the underlying pandas DataFrame."""
def method(*args, **kwargs):
res = getattr(self.data, name)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
result = DataSet()
result.data = res
result.print_log(name)
else:
result = res
return result
return method
def show(self):
parameters = [k for k in FINAL_PARAMETERS if k in self.data]
print("Shape: ", self.shape)
print("Parameters:", parameters)
return HTML(self.data[parameters]._repr_html_())
def head(self, n=5):
parameters = [k for k in FINAL_PARAMETERS if k in self.data]
res = self.data[parameters].head(n)
result = DataSet()
result.data = res
result.print_log("head")
return result
def drop_cols(self, cols, inplace=False):
"""Drops the list of columns from the DataFrame.
Listed columns that are not present in the DataFrame are simply ignored
(no error is thrown)."""
if inplace:
drop_cols(self.data, cols, inplace=True)
self.print_log("drop cols (inplace)")
else:
result = DataSet()
result.data = drop_cols(self.data, cols, inplace=False)
result.print_log("drop cols")
return result
def keep_cols(self, cols, inplace=False):
if inplace:
self.data = self.data[cols]
self.print_log("keep cols (inplace)")
else:
result = DataSet()
result.data = self.data[cols]
result.print_log("keep cols")
return result
def print_log(self, component, add_info=""):
if self.log:
print_log(self.data, component, add_info)
def load(self, fn, sep="\t"):
"""Read one or multiple result files and concatenate them into one dataset.
`fn` is a single filename (string) or a list of filenames."""
self.data = load(fn, sep=sep).data
self.print_log("load data")
def write_csv(self, fn, parameters=None, sep="\t"):
result = self.data.copy()
if isinstance(parameters, list):
result = result[parameters]
result.to_csv(fn, sep=sep, index=False)
def write_pkl(self, fn):
self.data.to_pickle(fn)
def write_parameters(self, fn="parameters.txt"):
parameters = sorted(self.measurements)
with open("parameters.txt", "w") as f:
f.write('"')
f.write('",\n"'.join(parameters))
f.write('"')
print(len(parameters), "parameters written.")
def describe(self, times_mad=3.0):
df = numeric_parameters(self.data)
stats = pd.DataFrame()
stats["Min"] = df.min()
stats["Max"] = df.max()
stats["Median"] = df.median()
stats["MAD"] = df.mad()
stats["Outliers"] = df[(((df - df.median()).abs() - times_mad * df.mad()) > 0)].count()
print(self.shape)
return stats
def well_type_from_position(self):
"""Assign the WellType from the position on the plate.
Controls are in column 11 and 12"""
result = DataSet(log=self.log)
result.data = well_type_from_position(self.data)
result.print_log("well type from pos")
return result
def well_from_position(self, well_name="Metadata_Well",
row_name="plateRow", col_name="plateColumn"):
"""Assign Metadata_Well from plateRow, plateColumn"""
result = DataSet(log=self.log)
result.data = well_from_position(self.data, well_name=well_name,
row_name=row_name, col_name=col_name)
result.print_log("well from pos")
return result
def position_from_well(self, well_name="Metadata_Well",
row_name="plateRow", col_name="plateColumn"):
"""Generate plateRow and plateColumn from Metatadata_Well"""
result = DataSet(log=self.log)
result.data = position_from_well(self.data, well_name=well_name,
row_name=row_name, col_name=col_name)
result.print_log("pos from well")
return result
def join_layout_384(self, layout_fn, on="Address_384"):
result = DataSet(log=self.log)
result.data = join_layout_384(self.data, layout_fn, on=on)
result.print_log("join layout 384")
return result
def join_layout_1536(self, plate, quadrant, on="Address_384", how="inner"):
"""Cell Painting is always run in 384er plates.
COMAS standard screening plates are format 1536.
With this function, the 1536-to-384 reformatting file
with the smiles added by join_smiles_to_layout_1536()
can be used directly to join the layout to the individual 384er plates."""
result = DataSet(log=self.log)
result.data = join_layout_1536(self.data, plate, quadrant, on=on, how=how)
result.print_log("join layout 1536")
return result
def numeric_parameters(self):
result = DataSet()
result.data = numeric_parameters(self.data)
return result
def flag_toxic(self, cutoff=LIMIT_CELL_COUNT_L / 100):
"""Flag data rows of toxic compounds"""
result = DataSet()
result.data = flag_toxic(self.data, cutoff=cutoff)
flagged = result.data["Toxic"].sum()
result.print_log("flag toxic", "{:3d} flagged".format(flagged))
return result
def remove_toxic(self, cutoff=LIMIT_CELL_COUNT_L / 100):
"""Remove data rows of toxic compounds"""
result = DataSet()
toxic = DataSet()
result.data, toxic.data = remove_toxic(self.data, cutoff=cutoff)
result.print_log("remove toxic", "{:3d} removed".format(toxic.shape[0]))
return result, toxic
def remove_impure(self, strict=False, reset_index=True):
"""Remove entries with `Pure_Flag == "Fail"`"""
result = DataSet()
flagged = DataSet()
result.data, flagged.data = remove_impure(self.data)
result.print_log("remove impure", "{:3d} removed".format(flagged.shape[0]))
return result, flagged
def remove_outliers(self, times_dev=3.0, group_by=None, method="median"):
"""Returns the filtered dataframe as well as the outliers.
method can be `median`or `mean` """
result = DataSet()
outliers = DataSet()
result.data, outliers.data = remove_outliers(self.data, times_dev=times_dev,
group_by=group_by, method=method)
result.print_log("remove outliers", "{:3d} removed".format(outliers.shape[0]))
return result, outliers
def remove_skipped_echo_direct_transfer(self, fn):
"""Remove wells that were reported as skipped in the Echo protocol (xml).
This functions works with Echo direct transfer protocols.
Function supports using wildcards in the filename, the first file will be used.
Returns a new dataframe without the skipped wells."""
result = DataSet()
result.data, skipped = remove_skipped_echo_direct_transfer(self.data, fn=fn)
skipped_str = "(" + ", ".join(skipped) + ")"
result.print_log("remove skipped", "{:3d} skipped {}".format(self.shape[0] - result.shape[0],
skipped_str))
return result
def drop_dups(self, cpd_id="Compound_Id"):
"""Drop duplicate Compound_Ids"""
result = DataSet()
result.data = self.data.drop_duplicates(cpd_id)
result.print_log("drop dups")
return result
def group_on_well(self, group_by=FINAL_PARAMETERS):
"""Group results on well level."""
result = DataSet()
result.data = group_on_well(self.data, group_by=group_by)
result.print_log("group on well")
return result
def join_batch_data(self, df_data=None, how="left", fillna="n.d."):
"""Join data by Batch_Id."""
result = DataSet()
result.data = join_batch_data(self.data, df_data=df_data, how=how, fillna=fillna)
result.print_log("join batch data")
return result
def join_container_data(self, df_data=None, how="left", fillna=""):
"""Join data by Container_Id."""
result = DataSet()
result.data = join_container_data(self.data, df_data=df_data, how=how, fillna=fillna)
result.print_log("join cntnr data")
return result
def join_container(self, cont_data=None, how="inner"):
result = DataSet(log=self.log)
result.data = join_container(self.data, cont_data=cont_data, how=how)
result.print_log("join container")
return result
def join_smiles(self, df_smiles=None, how="left"):
"""Join Smiles from Compound_Id."""
result = DataSet()
result.data = join_smiles(self.data, df_smiles=df_smiles, how=how)
result.print_log("join smiles")
return result
def join_annotations(self):
"""Join Annotations from Compound_Id."""
result = DataSet()
result.data = join_annotations(self.data)
result.print_log("join annotations")
return result
def add_dmso(self):
"""Add DMSO to references."""
result = DataSet()
result.data = add_dmso(self.data)
result.print_log("add DMSO")
return result
def poc(self, group_by=None, well_type="WellType", control_name="Control"):
"""Normalize the data set to Percent-Of-Control per group (e.g. per plate)
based on the median of the controls.
Parameters:
group_by (string or None): optional column by which the calculation should be grouped,
e.g. the column with plate name."""
result = DataSet()
result.data = poc(self.data, group_by=group_by)
self.print_log("POC")
return result
def activity_profile(self, mad_mult=3.5, parameters=ACT_PROF_PARAMETERS, only_final=True):
"""Generates the `Act_Profile` column.
The byte is set when the parameter's value is greater (or smaller)
than parameter_ctrl.median() + (or -) `mad_mult`* parameter.mad()
If a list of parameters is given, then the activity profile will be calculated
for these parameters.
If `only_final` == `True`, then only the parameters listed in `FINAL_PARAMETERS`
are kept in the output_table.
Returns a new Pandas DataFrame."""
result = DataSet()
result.data = activity_profile(self.data, mad_mult=mad_mult, parameters=parameters,
only_final=only_final)
result.print_log("activity profile")
return result
def relevant_parameters(self, ctrls_std_rel_min=0.001,
ctrls_std_rel_max=0.10):
result = DataSet()
result.data = relevant_parameters(self.data, ctrls_std_rel_min=ctrls_std_rel_min,
ctrls_std_rel_max=ctrls_std_rel_max)
num_parm = len(result.measurements)
result.print_log("relevant parameters", "{:.3f}/{:.3f}/{:4d}"
.format(ctrls_std_rel_min, ctrls_std_rel_max, num_parm))
return result
def correlation_filter(self, cutoff=0.9, method="pearson"):
"""The correlation removes all highly correlated columns from the dataframe.
The function was implemented according to the description of the corresponding
KNIME component.
Parameters:
cutoff (float): correlation cutoff
method (string): "pearson", "kendall", "spearman" (very slow)
Returns a new DataFrame with only the non-correlated columns"""
result = DataSet()
result.data, iterations = correlation_filter(self.data, cutoff=cutoff, method=method)
num_parm = len(result.measurements)
result.print_log("correl. filter (mad)", "{:3d} iterations/{:4d}"
.format(iterations, num_parm))
return result
def correlation_filter_std(self, cutoff=0.9, method="pearson"):
"""The correlation removes all highly correlated columns from the dataframe.
The function was implemented according to the description of the corresponding
KNIME component.
Parameters:
cutoff (float): correlation cutoff
method (string): "pearson", "kendall", "spearman" (very slow)
Returns a new DataFrame with only the non-correlated columns"""
result = DataSet()
result.data, iterations = correlation_filter_std(self.data, cutoff=cutoff, method=method)
num_parm = len(result.measurements)
result.print_log("correl. filter (std)", "{:3d} iterations/{:4d}"
.format(iterations, num_parm))
return result
def add_act_profile_for_control(self, parameters=ACT_PROF_PARAMETERS):
# Compound_Id DMSO: 245754
control = {"Compound_Id": 245754, "Trivial_Name": "Control", "Activity": 0,
"Act_Profile": "".join(["1"] * len(parameters))}
ck = control.keys()
for k in ck:
if k not in self.data.keys():
control.pop(k)
tmp = pd.DataFrame(control)
result = DataSet()
result.data = pd.concat(self.data, tmp)
return result
def update_similar_refs(self, mode="cpd", write=True):
"""Find similar compounds in references and update the export file.
The export file of the dict object is in pkl format. In addition,
a tsv file (or maybe JSON?) is written for use in PPilot.
This method dpes not return anything, it just writes the result to fle."""
rem = "" if write else "write is off"
update_similar_refs(self.data, mode=mode, write=write)
self.print_log("update similar", rem)
def update_datastore(self, mode="cpd", write=True):
"""Update the DataStore with the current DataFrame."""
update_datastore(self.data, mode=mode, write=write)
def find_similar(self, act_profile, cutoff=0.5, max_num=5):
"""Filter the dataframe for activity profiles similar to the given one.
`cutoff` gives the similarity threshold, default is 0.5."""
result = DataSet()
result.data = find_similar(self.data, act_profile=act_profile, cutoff=cutoff, max_num=max_num)
result.print_log("find similar")
return result
def well_id_similarity(self, well_id1, well_id2):
"""Calculate the similarity of the activity profiles from two compounds
(identified by `Compound_Id`). Returns value between 0 .. 1"""
return well_id_similarity(self.data, well_id1, self.data, well_id2)
def count_active_parameters_occurrences(self, act_prof="Act_Profile",
parameters=ACT_PROF_PARAMETERS):
"""Counts the number of times each parameter has been active in the dataset."""
return count_active_parameters_occurrences(self.data, act_prof=act_prof,
parameters=ACT_PROF_PARAMETERS)
@property
def shape(self):
return self.data.shape
@property
def metadata(self):
"""Returns a list of the those parameters in the DataFrame that are NOT CellProfiler measurements."""
return metadata(self.data)
@property
def measurements(self):
"""Returns a list of the CellProfiler parameters that are in the DataFrame."""
return measurements(self.data)
def load(fn, sep="\t"):
"""Read one or multiple result files and concatenate them into one dataset.
`fn` is a single filename (string) or a list of filenames."""
result = DataSet()
if isinstance(fn, list):
result.data = pd.concat((pd.read_csv(f, sep=sep) for f in fn))
else:
result.data = pd.read_csv(fn, sep=sep)
drop = [d for d in DROP_GLOBAL if d in result.data.keys()]
result.data.drop(drop, axis=1, inplace=True)
result.print_log("load dataset")
return result
def load_pkl(fn):
result = DataSet()
result.data = pd.read_pickle(fn)
result.print_log("load pickle")
return result
def print_log(df, component, add_info=""):
component = component + ":"
if len(add_info) > 0:
add_info = " ({})".format(add_info)
print("* {:22s} ({:5d} | {:4d}){}".format(component, df.shape[0], df.shape[1], add_info))
def read_smiles_file(fn, props=['Compound_Id', "Smiles"]):
"""Read in the file with the Compound_Ids and the Smiles.
Return a DataFrame for fast access."""
result = pd.read_csv(fn, sep="\t")
result = result[props]
result = result.apply(pd.to_numeric, errors='ignore')
return result
def clear_resources():
try:
del SMILES
print("* deleted resource: SMILES")
except NameError:
pass
try:
del ANNOTATIONS
print("* deleted resource: ANNOTATIONS")
except NameError:
pass
try:
del REFERENCES
print("* deleted resource: REFERENCES")
except NameError:
pass
try:
del SIM_REFS
print("* deleted resource: SIM_REFS")
except NameError:
pass
try:
del DATASTORE
print("* deleted resource: DATASTORE")
except NameError:
pass
try:
del LAYOUTS
print("* deleted resource: LAYOUTS")
except NameError:
pass
def load_resource(resource, mode="cpd"):
"""Available resources: SMILES, ANNOTATIONS, SIM_REFS, REFERENCES,
CONTAINER, CONTAINER_DATA, BATCH_DATA, DATASTORE, LAYOUTS"""
res = resource.lower()
glbls = globals()
if "smi" in res:
if "SMILES" not in glbls:
# except NameError:
global SMILES
print("- loading resource: (SMILES)")
SMILES = read_smiles_file(cprp.smiles_path,
props=cprp.smiles_cols)
SMILES = SMILES.apply(pd.to_numeric, errors='ignore')
elif "annot" in res:
if "ANNOTATIONS" not in glbls:
global ANNOTATIONS
print("- loading resource: (ANNOTATIONS)")
ANNOTATIONS = pd.read_csv(cprp.annotations_path, sep="\t")
ANNOTATIONS = ANNOTATIONS.apply(pd.to_numeric, errors='ignore')
elif "sim" in res:
if "SIM_REFS" not in glbls:
global SIM_REFS
print("- loading resource: (SIM_REFS)")
if "ext" in mode.lower():
srp = cprp.sim_refs_ext_path
else:
srp = cprp.sim_refs_path
try:
SIM_REFS = pd.read_csv(srp, sep="\t")
except FileNotFoundError:
print(" * SIM_REFS not found, creating new one.")
SIM_REFS = pd.DataFrame()
elif "ref" in res:
if "REFERENCES" not in glbls:
global REFERENCES
print("- loading resource: (REFERENCES)")
REFERENCES = pd.read_csv(cprp.references_path, sep="\t") # .fillna("")
elif "cont" in res:
if "CONTAINER" not in glbls:
global CONTAINER
print("- loading resource: (CONTAINER)")
CONTAINER = pd.read_csv(cprp.container_path, sep="\t")
if len(cprp.container_data_cols) > 0:
CONTAINER = CONTAINER[cprp.container_cols]
CONTAINER = CONTAINER.apply(pd.to_numeric, errors='ignore')
elif "container_d" in res:
if "CONTAINER_DATA" not in glbls:
global CONTAINER_DATA
print("- loading resource: (CONTAINER)")
CONTAINER_DATA = pd.read_csv(cprp.container_data_path, sep="\t")
if len(cprp.container_data_cols) > 0:
CONTAINER_DATA = CONTAINER_DATA[cprp.container_data_cols]
CONTAINER_DATA = CONTAINER_DATA.apply(pd.to_numeric, errors='ignore')
elif "batch_d" in res:
if "BATCH_DATA" not in glbls:
global BATCH_DATA
print("- loading resource: (BATCH_DATA)")
BATCH_DATA = pd.read_csv(cprp.batch_data_path, sep="\t")
if len(cprp.batch_data_cols) > 0:
BATCH_DATA = BATCH_DATA[cprp.batch_data_cols]
BATCH_DATA = BATCH_DATA.apply(pd.to_numeric, errors='ignore')
elif "datast" in res:
if "DATASTORE" not in glbls:
global DATASTORE
print("- loading resource: (DATASTORE)")
try:
DATASTORE = pd.read_csv(cprp.datastore_path, sep="\t")
except FileNotFoundError:
print(" * DATASTORE not found, creating new one.")
DATASTORE = pd.DataFrame()
elif "layout" in res:
if "LAYOUTS" not in glbls:
global LAYOUTS
print("- loading resource: (LAYOUTS)")
LAYOUTS = pd.read_csv(cprp.layouts_path, sep="\t")
else:
raise FileNotFoundError("# unknown resource: {}".format(resource))
def well_type_from_position(df):
"""Assign the WellType from the position on the plate.
Controls are in column 11 and 12"""
result = df.copy()
result["WellType"] = "Compound"
result["WellType"][(result["plateColumn"] == 11) | (result["plateColumn"] == 12)] = "Control"
return result
def drop_cols(df, cols, inplace=False):
"""Drops the list of columns from the DataFrame.
Listed columns that are not present in the DataFrame are simply ignored
(no error is thrown)."""
df_keys = df.keys()
drop = [k for k in cols if k in df_keys]
if inplace:
df.drop(drop, axis=1, inplace=True)
else:
result = df.drop(drop, axis=1)
return result
def well_from_position(df, well_name="Metadata_Well",
row_name="plateRow", col_name="plateColumn"):
"""Assign Metadata_Well from plateRow, plateColumn"""
def _well_from_position_series(s):
return cpt.well_from_position_single(s[0], s[1])
result = df.copy()
result[well_name] = result[[row_name, col_name]].apply(_well_from_position_series, axis=1)
return result
def position_from_well(df, well_name="Metadata_Well",
row_name="plateRow", col_name="plateColumn"):
"""Generate plateRow and plateColumn from Metatadata_Well"""
def _position_from_well_series(well):
return (pd.Series(cpt.position_from_well_single(well)))
result = df.copy()
result[[row_name, col_name]] = result[well_name].apply(_position_from_well_series)
return result
def join_layout_384(df, layout_fn, on="Address"):
result = df.copy()
result[on] = result["Metadata_Well"]
layout = pd.read_csv(layout_fn)
result = result.merge(layout, on=on)
result.drop(on, axis=1, inplace=True)
result = result.apply(pd.to_numeric, errors='ignore')
return result
def get_batch_from_container(df):
result = df.copy()
result["Batch_Id"] = result["Container_Id"].str[:9]
return result
def get_cpd_from_container(df):
result = pd.concat([df, df["Container_Id"].str.split(":", expand=True)], axis=1)
result.rename(columns={0: "Compound_Id"}, inplace=True)
drop_cols(result, [1, 2, 3, 4], inplace=True)
return result
def join_layout_1536(df, plate, quadrant, on="Address_384", sep="\t", how="inner"):
"""Cell Painting is always run in 384er plates.
COMAS standard screening plates are format 1536.
With this function, the 1536-to-384 reformatting file
can be used directly to join the layout to the individual 384er plates."""
load_resource("LAYOUTS")
layout = LAYOUTS.copy()
if not isinstance(quadrant, str):
quadrant = str(quadrant)
drop = ["Plate_name_384", "Plate_name_1536", "Address_1536", "Index", 1, 2]
result = df.copy()
layout[on] = layout["Plate_name_384"] + layout[on]
if "Container_ID_1536" in layout.keys():
layout.rename(columns={"Container_ID_1536": "Container_Id"}, inplace=True)
if "Conc" in layout.keys():
layout.rename(columns={"Conc": "Conc_uM"}, inplace=True)
layout = join_container(layout)
drop_cols(layout, drop, inplace=True)
result[on] = plate + "." + quadrant[-1:] + result["Metadata_Well"]
result = result.merge(layout, on=on, how=how)
result.drop(on, axis=1, inplace=True)
result["Well_Id"] = result["Container_Id"] + "_" + result["Metadata_Well"]
result = result.apply(pd.to_numeric, errors='ignore')
return result
def write_datastore():
df = DATASTORE[cprp.datastore_cols]
df = df.sort_values("Well_Id")
df.to_csv(cprp.datastore_path, index=False, sep="\t")
print_log(df, "write datastore")
def update_datastore(df2, on="Well_Id", mode="cpd", write=False):
global DATASTORE
load_resource("DATASTORE")
df1 = DATASTORE
df2 = df2.copy()
if "ref" in mode:
df2["Is_Ref"] = True
else:
df2["Is_Ref"] = False
df2 = df2[cprp.datastore_cols]
df1 = df1.append(df2, ignore_index=True)
rem = "" if write else "write is off"
print_log(df2, "update datastore", rem)
DATASTORE = df1.drop_duplicates(subset=on, keep="last")
if write:
write_datastore()
def join_batch_data(df, df_data=None, how="Left", fillna="n.d."):
"""Join data from Batch_Id."""
if df_data is None:
load_resource("BATCH_DATA")
df_data = BATCH_DATA
if "Batch_Id" not in df.keys():
df = get_batch_from_container(df)
result = df.merge(df_data, on="Batch_Id", how=how)
result = result.apply(pd.to_numeric, errors='ignore')
result = result.fillna(fillna)
return result
def join_container_data(df, df_data=None, how="Left", fillna=""):
"""Join data from Container_Id."""
if df_data is None:
load_resource("CONTAINER_DATA")
df_data = CONTAINER_DATA
result = df.merge(df_data, on="Container_Id", how=how)
result = result.apply(pd.to_numeric, errors='ignore')
result = result.fillna(fillna)
return result
def join_container(df, cont_data=None, how="inner"):
if cont_data is None:
load_resource("CONTAINER")
cont_data = CONTAINER[["Container_Id", "Compound_Id"]]
result = df.merge(cont_data, on="Container_Id", how=how)
return result
def join_smiles(df, df_smiles=None, how="left"):
"""Join Smiles from Compound_Id."""
if df_smiles is None:
load_resource("SMILES")
df_smiles = SMILES
result = df.merge(df_smiles, on="Compound_Id", how=how)
result = result.apply(pd.to_numeric, errors='ignore')
result = result.fillna("*")
return result
def join_annotations(df):
"""Join Annotations from Compound_Id."""
load_resource("ANNOTATIONS")
annotations = ANNOTATIONS
drop_cols(df, ["Trivial_Name", "Known_Act"], inplace=True)
result = df.merge(annotations, on="Compound_Id", how="left")
result = result.fillna("")
return result
def add_dmso(df):
if df[df["Compound_Id"] == 245754].shape[0] > 0:
# DMSO already present
result = df.copy()
else:
d = {
"Compound_Id": [245754], "Container_Id": ["245754:01:01"], "Well_Id": ["245754:01:01_H11"],
"Producer": ["DMSO"], "Conc_uM": [10], "Activity": [0.0], "Rel_Cell_Count": [100],
"Pure_Flag": ["Ok"], "Toxic": [False], "Trivial_Name": ["DMSO"], "Known_Act": ["Control"],
"Metadata_Well": ["H11"], "Plate": ["170523-S0195-1"], "Smiles": ["CS(C)=O"],
"Act_Profile": [len(ACT_PROF_PARAMETERS) * "1"]
}
dmso = pd.DataFrame(d)
result = pd.concat([df, dmso])
return result
def metadata(df):
"""Returns a list of the those parameters in the DataFrame that are NOT CellProfiler measurements."""
parameters = [k for k in df.keys()
if not (k.startswith("Count_") or k.startswith("Median_"))]
return parameters
def measurements(df):
"""Returns a list of the CellProfiler parameters that are in the DataFrame."""
parameters = [k for k in df.select_dtypes(include=[np.number]).keys()
if k.startswith("Count_") or k.startswith("Median_")]
return parameters
def numeric_parameters(df):
result = df.copy()[measurements(df)]
return result
def flag_toxic(df, cutoff=LIMIT_CELL_COUNT_L / 100):
"""Flag data rows of toxic compounds"""
result = df.copy()
median_cell_count_controls = df[df["WellType"] == "Control"]["Count_Cells"].median()
result["Toxic"] = (result["Count_Cells"] < median_cell_count_controls * cutoff)
result["Rel_Cell_Count"] = (100 * (result["Count_Cells"] / median_cell_count_controls)).astype(int)
return result
def remove_toxic(df, cutoff=LIMIT_CELL_COUNT_L / 100):
"""Remove data rows of toxic compounds"""
if "Toxic" not in df.keys():
flagged = flag_toxic(df, cutoff=cutoff)
else:
flagged = df.copy()
result = flagged[~flagged["Toxic"]]
toxic = flagged[flagged["Toxic"]]
return result, toxic
def remove_skipped_echo_direct_transfer(df, fn):
"""Remove wells that were reported as skipped in the Echo protocol (xml).
This functions works with Echo direct transfer protocols.
Function supports using wildcards in the filename, the first file will be used.
Returns a new dataframe without the skipped wells."""
assert fn.endswith(".xml"), "Echo file expected in XML format."
skipped_wells = []
try:
echo_fn = glob.glob(fn)[0] # use the first glob match
except IndexError:
raise FileNotFoundError("Echo file could not be found")
echo_print = ET.parse(echo_fn).getroot()
skipped = echo_print.find("skippedwells")
for well in skipped.findall("w"):
skipped_wells.append(cpt.format_well(well.get("dn")))
# print("Skipped wells (will be removed):", skipped_wells)
# remove the rows with the skipped wells
# i.e. keep the rows where Metadata_Well is not in the list skipped_wells
result = df[~df["Metadata_Well"].isin(skipped_wells)]
return result, skipped_wells
def remove_impure(df, strict=False, reset_index=True):
"""Remove entries with `Pure_Flag == "Fail"`
If `strict == True` compound with `Pure_Flag == Warn` are also removed."""
result = df.copy()
outliers_list = []
try:
outl = result[result["Pure_Flag"] == "Fail"]
except TypeError:
print(result["Pure_Flag"].dtype)
raise
result = result[result["Pure_Flag"] != "Fail"]
outliers_list.append(outl)
if strict:
outl = result[result["Pure_Flag"] == "Warn"]
result = result[result["Pure_Flag"] != "Warn"]
outliers_list.append(outl)
outliers = | pd.concat(outliers_list) | pandas.concat |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df = pandas.DataFrame(frame_data)
result = df.fillna(0, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna(0, downcast="infer")
df_equals(modin_df, result)
# infer int64 from float64 when fillna value is a dict
df = pandas.DataFrame(frame_data)
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill")
)
def test_bfill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill")
)
def test_fillna_inplace(self):
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}),
df.fillna({"a": 0, "b": 5, "d": 7}),
)
# Series treated same as dict
df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))
def test_fillna_dataframe(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
modin_df2 = pd.DataFrame(df2)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(modin_df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value(self):
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns(self):
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(
modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"])
)
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
with pytest.raises(TypeError):
modin_df.filter(items=by["items"], regex=by["regex"])
with pytest.raises(TypeError):
modin_df.filter()
def test_first(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.first("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
def test_get_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_value(0, "col1")
def test_get_values(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(self, data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
def test_hist(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_df.T.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.T.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.T.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
def test_infer_objects(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).infer_objects()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
def test_info(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).info(memory_usage="deep")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert(self, data, loc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
column = "New Column"
value = modin_df.iloc[:, 0]
try:
pandas_df.insert(loc, column, value)
except Exception as e:
with pytest.raises(type(e)):
modin_df.insert(loc, column, value)
else:
modin_df.insert(loc, column, value)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Bad Column", modin_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Duplicate", modin_df[modin_df.columns[0]])
pandas_df.insert(0, "Duplicate", pandas_df[pandas_df.columns[0]])
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Scalar", 100)
pandas_df.insert(0, "Scalar", 100)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Too Short", list(modin_df[modin_df.columns[0]])[:-1])
with pytest.raises(ValueError):
modin_df.insert(0, modin_df.columns[0], modin_df[modin_df.columns[0]])
with pytest.raises(IndexError):
modin_df.insert(len(modin_df.columns) + 100, "Bad Loc", 100)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(columns=list("ab")).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(columns=list("ab")).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(index=modin_df.index).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(index=pandas_df.index).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.insert(
0, "DataFrame insert", modin_df[[modin_df.columns[0]]]
)
pandas_result = pandas_df.insert(
0, "DataFrame insert", pandas_df[[pandas_df.columns[0]]]
)
df_equals(modin_result, pandas_result)
def test_interpolate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).interpolate()
def test_is_copy(self):
data = test_data_values[0]
with pytest.warns(FutureWarning):
assert pd.DataFrame(data).is_copy == pandas.DataFrame(data).is_copy
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_items(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.items()
pandas_items = pandas_df.items()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iteritems(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.iteritems()
pandas_items = pandas_df.iteritems()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iterrows(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterrows = modin_df.iterrows()
pandas_iterrows = pandas_df.iterrows()
for modin_row, pandas_row in zip(modin_iterrows, pandas_iterrows):
modin_index, modin_series = modin_row
pandas_index, pandas_series = pandas_row
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_itertuples(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# test default
modin_it_default = modin_df.itertuples()
pandas_it_default = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_default, pandas_it_default):
np.testing.assert_equal(modin_row, pandas_row)
# test all combinations of custom params
indices = [True, False]
names = [None, "NotPandas", "Pandas"]
for index in indices:
for name in names:
modin_it_custom = modin_df.itertuples(index=index, name=name)
pandas_it_custom = pandas_df.itertuples(index=index, name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.ix()
def test_join(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col5": [0], "col6": [1]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["left", "right", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join(modin_df2, how=how)
pandas_join = pandas_df.join(pandas_df2, how=how)
df_equals(modin_join, pandas_join)
frame_data3 = {"col7": [1, 2, 3, 5, 6, 7, 8]}
modin_df3 = pd.DataFrame(frame_data3)
pandas_df3 = pandas.DataFrame(frame_data3)
join_types = ["left", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join([modin_df2, modin_df3], how=how)
pandas_join = pandas_df.join([pandas_df2, pandas_df3], how=how)
df_equals(modin_join, pandas_join)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
def test_kurt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurt()
def test_kurtosis(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurtosis()
def test_last(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.last("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_last_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.last_valid_index() == (pandas_df.last_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
assert modin_df.loc[0, key1] == pandas_df.loc[0, key1]
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [
True if i % 3 == 0 else False for i in range(len(modin_df.index))
]
columns = [
True if i % 5 == 0 else False for i in range(len(modin_df.columns))
]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
def test_loc_multi_index(self):
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(
modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"]
)
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert (
modin_df.loc[("bar", "one"), "col1"]
== pandas_df.loc[("bar", "one"), "col1"]
)
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
def test_lookup(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).lookup([0, 1], ["col1", "col2"])
def test_mad(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).mad()
def test_mask(self):
df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
m = df % 3 == 0
with pytest.warns(UserWarning):
try:
df.mask(~m, -df)
except ValueError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_max(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mean(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_median(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
class TestDFPartTwo:
def test_melt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).melt()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"index", bool_arg_values, ids=arg_keys("index", bool_arg_keys)
)
def test_memory_usage(self, data, index):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
modin_result = modin_df.memory_usage(index=index)
pandas_result = pandas_df.memory_usage(index=index)
df_equals(modin_result, pandas_result)
def test_merge(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = modin_df.merge(modin_df2, how=how)
pandas_result = pandas_df.merge(pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
# Named Series promoted to DF
s = pd.Series(frame_data2.get("col1"))
with pytest.raises(ValueError):
modin_df.merge(s)
s = pd.Series(frame_data2.get("col1"), name="col1")
df_equals(modin_df.merge(s), modin_df.merge(modin_df2[["col1"]]))
with pytest.raises(ValueError):
modin_df.merge("Non-valid type")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_min(self, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mode(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mode(axis=axis, numeric_only=numeric_only)
except Exception:
with pytest.raises(TypeError):
modin_df.mode(axis=axis, numeric_only=numeric_only)
else:
modin_result = modin_df.mode(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.ndim == pandas_df.ndim
def test_nlargest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nlargest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notna(), pandas_df.notna())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notnull(), pandas_df.notnull())
def test_nsmallest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nsmallest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"dropna", bool_arg_values, ids=arg_keys("dropna", bool_arg_keys)
)
def test_nunique(self, data, axis, dropna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.T.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
def test_pct_change(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).pct_change()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pipe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
n = len(modin_df.index)
a, b, c = 2 % n, 0, 3 % n
col = modin_df.columns[3 % len(modin_df.columns)]
def h(x):
return x.drop(columns=[col])
def g(x, arg1=0):
for _ in range(arg1):
x = x.append(x)
return x
def f(x, arg2=0, arg3=0):
return x.drop([arg2, arg3])
df_equals(
f(g(h(modin_df), arg1=a), arg2=b, arg3=c),
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
df_equals(
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
(pandas_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
def test_pivot(self):
df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
with pytest.warns(UserWarning):
df.pivot(index="foo", columns="bar", values="baz")
def test_pivot_table(self):
df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
with pytest.warns(UserWarning):
df.pivot_table(values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_plot(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
# We have to test this way because equality in plots means same object.
zipped_plot_lines = zip(modin_df.plot().lines, pandas_df.plot().lines)
for l, r in zipped_plot_lines:
if isinstance(l.get_xdata(), np.ma.core.MaskedArray) and isinstance(
r.get_xdata(), np.ma.core.MaskedArray
):
assert all((l.get_xdata() == r.get_xdata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
if isinstance(l.get_ydata(), np.ma.core.MaskedArray) and isinstance(
r.get_ydata(), np.ma.core.MaskedArray
):
assert all((l.get_ydata() == r.get_ydata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
temp_modin_df = modin_df.copy()
temp_pandas_df = pandas_df.copy()
modin_popped = temp_modin_df.pop(key)
pandas_popped = temp_pandas_df.pop(key)
df_equals(modin_popped, pandas_popped)
df_equals(temp_modin_df, temp_pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_prod(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_product(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.product(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("q", quantiles_values, ids=quantiles_keys)
def test_quantile(self, request, data, q):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.quantile(q), pandas_df.quantile(q))
df_equals(modin_df.quantile(q, axis=1), pandas_df.quantile(q, axis=1))
try:
pandas_result = pandas_df.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.quantile(q)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.T.quantile(q), pandas_df.T.quantile(q))
df_equals(modin_df.T.quantile(q, axis=1), pandas_df.T.quantile(q, axis=1))
try:
pandas_result = pandas_df.T.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.T.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.T.quantile(q)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("funcs", query_func_values, ids=query_func_keys)
def test_query(self, data, funcs):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.query("")
with pytest.raises(NotImplementedError):
x = 2 # noqa F841
modin_df.query("col1 < @x")
try:
pandas_result = pandas_df.query(funcs)
except Exception as e:
with pytest.raises(type(e)):
modin_df.query(funcs)
else:
modin_result = modin_df.query(funcs)
df_equals(modin_result, pandas_result)
def test_query_after_insert(self):
modin_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
modin_df["z"] = modin_df.eval("x / y")
modin_df = modin_df.query("z >= 0")
modin_result = modin_df.reset_index(drop=True)
modin_result.columns = ["a", "b", "c"]
pandas_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
pandas_df["z"] = pandas_df.eval("x / y")
pandas_df = pandas_df.query("z >= 0")
pandas_result = pandas_df.reset_index(drop=True)
pandas_result.columns = ["a", "b", "c"]
df_equals(modin_result, pandas_result)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"na_option", ["keep", "top", "bottom"], ids=["keep", "top", "bottom"]
)
def test_rank(self, data, axis, numeric_only, na_option):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.rank(axis=axis, numeric_only=numeric_only, na_option=na_option)
else:
modin_result = modin_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
df_equals(modin_result, pandas_result)
def test_reindex(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))
df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))
df_equals(
modin_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
pandas_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
)
df_equals(
modin_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
pandas_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
)
df_equals(
modin_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
pandas_df.reindex(
index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]
),
)
df_equals(
modin_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
pandas_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
)
def test_reindex_like(self):
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
with pytest.warns(UserWarning):
df2.reindex_like(df1)
def test_rename_sanity(self):
test_data = TestData()
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
modin_df = pd.DataFrame(test_data.frame)
df_equals(
modin_df.rename(columns=mapping), test_data.frame.rename(columns=mapping)
)
renamed2 = test_data.frame.rename(columns=str.lower)
df_equals(modin_df.rename(columns=str.lower), renamed2)
modin_df = pd.DataFrame(renamed2)
df_equals(
modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper)
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
tm.assert_index_equal(
modin_df.rename(index={"foo": "bar", "bar": "foo"}).index,
df.rename(index={"foo": "bar", "bar": "foo"}).index,
)
tm.assert_index_equal(
modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index
)
# have to pass something
with pytest.raises(TypeError):
modin_df.rename()
# partial columns
renamed = test_data.frame.rename(columns={"C": "foo", "D": "bar"})
modin_df = pd.DataFrame(test_data.frame)
tm.assert_index_equal(
modin_df.rename(columns={"C": "foo", "D": "bar"}).index,
test_data.frame.rename(columns={"C": "foo", "D": "bar"}).index,
)
# TODO: Uncomment when transpose works
# other axis
# renamed = test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
# tm.assert_index_equal(
# test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'}).index,
# modin_df.T.rename(index={'C': 'foo', 'D': 'bar'}).index)
# index with name
index = pandas.Index(["foo", "bar"], name="name")
renamer = pandas.DataFrame(data, index=index)
modin_df = pd.DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
modin_renamed = modin_df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, modin_renamed.index)
assert renamed.index.name == modin_renamed.index.name
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = pandas.MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = pandas.MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
frame_data = [(0, 0), (1, 1)]
df = pandas.DataFrame(frame_data, index=index, columns=columns)
modin_df = pd.DataFrame(frame_data, index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
modin_renamed = modin_df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.index, modin_renamed.index)
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
assert renamed.index.names == modin_renamed.index.names
assert renamed.columns.names == modin_renamed.columns.names
#
# with specifying a level
# dict
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# function
func = str.upper
renamed = df.rename(columns=func, level=0)
modin_renamed = modin_df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="fizz")
modin_renamed = modin_df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level=1)
modin_renamed = modin_df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="buzz")
modin_renamed = modin_df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# index
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
modin_renamed = modin_df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(modin_renamed.index, renamed.index)
@pytest.mark.skip(reason="Pandas does not pass this test")
def test_rename_nocopy(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
modin_renamed = modin_df.rename(columns={"C": "foo"}, copy=False)
modin_renamed["foo"] = 1
assert (modin_df["C"] == 1).all()
def test_rename_inplace(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
df_equals(
modin_df.rename(columns={"C": "foo"}),
test_data.rename(columns={"C": "foo"}),
)
frame = test_data.copy()
modin_frame = modin_df.copy()
frame.rename(columns={"C": "foo"}, inplace=True)
modin_frame.rename(columns={"C": "foo"}, inplace=True)
df_equals(modin_frame, frame)
def test_rename_bug(self):
# rename set ref_locs, and set_index was not resetting
frame_data = {0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# df = df.set_index(['a', 'b'])
# df.columns = ['2001-01-01']
modin_df = modin_df.rename(columns={0: "a"})
modin_df = modin_df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# modin_df = modin_df.set_index(['a', 'b'])
# modin_df.columns = ['2001-01-01']
df_equals(modin_df, df)
def test_rename_axis(self):
data = {"num_legs": [4, 4, 2], "num_arms": [0, 0, 2]}
index = ["dog", "cat", "monkey"]
modin_df = pd.DataFrame(data, index)
pandas_df = pandas.DataFrame(data, index)
df_equals(modin_df.rename_axis("animal"), pandas_df.rename_axis("animal"))
df_equals(
modin_df.rename_axis("limbs", axis="columns"),
pandas_df.rename_axis("limbs", axis="columns"),
)
modin_df.rename_axis("limbs", axis="columns", inplace=True)
pandas_df.rename_axis("limbs", axis="columns", inplace=True)
df_equals(modin_df, pandas_df)
new_index = pd.MultiIndex.from_product(
[["mammal"], ["dog", "cat", "monkey"]], names=["type", "name"]
)
modin_df.index = new_index
pandas_df.index = new_index
df_equals(
modin_df.rename_axis(index={"type": "class"}),
pandas_df.rename_axis(index={"type": "class"}),
)
df_equals(
modin_df.rename_axis(columns=str.upper),
pandas_df.rename_axis(columns=str.upper),
)
df_equals(
modin_df.rename_axis(
columns=[str.upper(o) for o in modin_df.columns.names]
),
pandas_df.rename_axis(
columns=[str.upper(o) for o in pandas_df.columns.names]
),
)
with pytest.raises(ValueError):
df_equals(
modin_df.rename_axis(str.upper, axis=1),
pandas_df.rename_axis(str.upper, axis=1),
)
def test_rename_axis_inplace(self):
test_frame = TestData().frame
modin_df = pd.DataFrame(test_frame)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("foo", inplace=True)
modin_no_return = modin_result.rename_axis("foo", inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
modin_no_return = modin_result.rename_axis("bar", axis=1, inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
def test_reorder_levels(self):
df = pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
)
)
df["Value"] = np.random.randint(1, 100, len(df))
with pytest.warns(UserWarning):
df.reorder_levels(["Letter", "Color", "Number"])
def test_replace(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).replace()
def test_resample(self):
d = dict(
{
"price": [10, 11, 9, 13, 14, 18, 17, 19],
"volume": [50, 60, 40, 100, 50, 100, 40, 50],
}
)
df = pd.DataFrame(d)
df["week_starting"] = pd.date_range("01/01/2018", periods=8, freq="W")
with pytest.warns(UserWarning):
df.resample("M", on="week_starting")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_reset_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.reset_index(inplace=False)
pandas_result = pandas_df.reset_index(inplace=False)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pd_df_cp = pandas_df.copy()
modin_df_cp.reset_index(inplace=True)
pd_df_cp.reset_index(inplace=True)
df_equals(modin_df_cp, pd_df_cp)
def test_rolling(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.rolling(2, win_type="triang")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_round(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.round(), pandas_df.round())
df_equals(modin_df.round(1), pandas_df.round(1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_sample(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.sample(n=3, frac=0.4, axis=axis)
with pytest.raises(KeyError):
modin_df.sample(frac=0.5, weights="CoLuMn_No_ExIsT", axis=0)
with pytest.raises(ValueError):
modin_df.sample(frac=0.5, weights=modin_df.columns[0], axis=1)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5, weights=[0.5 for _ in range(len(modin_df.index[:-1]))], axis=0
)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5,
weights=[0.5 for _ in range(len(modin_df.columns[:-1]))],
axis=1,
)
with pytest.raises(ValueError):
modin_df.sample(n=-3, axis=axis)
with pytest.raises(ValueError):
modin_df.sample(frac=0.2, weights=pandas.Series(), axis=axis)
if isinstance(axis, str):
num_axis = pandas.DataFrame()._get_axis_number(axis)
else:
num_axis = axis
# weights that sum to 1
sums = sum(i % 2 for i in range(len(modin_df.axes[num_axis])))
weights = [i % 2 / sums for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
# weights that don't sum to 1
weights = [i % 2 for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=0, axis=axis)
pandas_result = pandas_df.sample(n=0, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(frac=0.5, random_state=42, axis=axis)
pandas_result = pandas_df.sample(frac=0.5, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=2, random_state=42, axis=axis)
pandas_result = pandas_df.sample(n=2, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
def test_select_dtypes(self):
frame_data = {
"test1": list("abc"),
"test2": np.arange(3, 6).astype("u1"),
"test3": np.arange(8.0, 11.0, dtype="float64"),
"test4": [True, False, True],
"test5": pandas.date_range("now", periods=3).values,
"test6": list(range(5, 8)),
}
df = pandas.DataFrame(frame_data)
rd = pd.DataFrame(frame_data)
include = np.float, "integer"
exclude = (np.bool_,)
r = rd.select_dtypes(include=include, exclude=exclude)
e = df[["test2", "test3", "test6"]]
df_equals(r, e)
r = rd.select_dtypes(include=np.bool_)
e = df[["test4"]]
df_equals(r, e)
r = rd.select_dtypes(exclude=np.bool_)
e = df[["test1", "test2", "test3", "test5", "test6"]]
df_equals(r, e)
try:
pd.DataFrame().select_dtypes()
assert False
except ValueError:
assert True
def test_sem(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).sem()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_set_axis(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
x = pandas.DataFrame()._get_axis_number(axis)
index = modin_df.columns if x else modin_df.index
labels = ["{0}_{1}".format(index[i], i) for i in range(modin_df.shape[x])]
modin_result = modin_df.set_axis(labels, axis=axis, inplace=False)
pandas_result = pandas_df.set_axis(labels, axis=axis, inplace=False)
df_equals(modin_result, pandas_result)
with pytest.warns(FutureWarning):
modin_df.set_axis(axis, labels, inplace=False)
modin_df_copy = modin_df.copy()
modin_df.set_axis(labels, axis=axis, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_axis(labels, axis=axis, inplace=True)
df_equals(modin_df, pandas_df)
with pytest.warns(FutureWarning):
modin_df.set_axis(labels, axis=axis, inplace=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"drop", bool_arg_values, ids=arg_keys("drop", bool_arg_keys)
)
@pytest.mark.parametrize(
"append", bool_arg_values, ids=arg_keys("append", bool_arg_keys)
)
def test_set_index(self, request, data, drop, append):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.set_index(
key, drop=drop, append=append, inplace=False
)
pandas_result = pandas_df.set_index(
key, drop=drop, append=append, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_copy = modin_df.copy()
modin_df.set_index(key, drop=drop, append=append, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_index(key, drop=drop, append=append, inplace=True)
df_equals(modin_df, pandas_df)
def test_set_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).set_value(0, 0, 0)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_shape(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.shape == pandas_df.shape
def test_shift(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).shift()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_size(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.size == pandas_df.size
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_skew(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.skew(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.skew(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
def test_slice_shift(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).slice_shift()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"ascending", bool_arg_values, ids=arg_keys("ascending", bool_arg_keys)
)
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
@pytest.mark.parametrize(
"sort_remaining", bool_arg_values, ids=arg_keys("sort_remaining", bool_arg_keys)
)
def test_sort_index(self, data, axis, ascending, na_position, sort_remaining):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# Change index value so sorting will actually make a difference
if axis == "rows" or axis == 0:
length = len(modin_df.index)
modin_df.index = [(i - length / 2) % length for i in range(length)]
pandas_df.index = [(i - length / 2) % length for i in range(length)]
# Add NaNs to sorted index
if axis == "rows" or axis == 0:
length = len(modin_df.index)
modin_df.index = [
np.nan if i % 2 == 0 else modin_df.index[i] for i in range(length)
]
pandas_df.index = [
np.nan if i % 2 == 0 else pandas_df.index[i] for i in range(length)
]
else:
length = len(modin_df.columns)
modin_df.columns = [
np.nan if i % 2 == 0 else modin_df.columns[i] for i in range(length)
]
pandas_df.columns = [
np.nan if i % 2 == 0 else pandas_df.columns[i] for i in range(length)
]
modin_result = modin_df.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=False
)
pandas_result = pandas_df.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=True
)
pandas_df_cp.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=True
)
df_equals(modin_df_cp, pandas_df_cp)
# MultiIndex
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pd.MultiIndex.from_tuples(
[(i // 10, i // 5, i) for i in range(len(modin_df))]
)
pandas_df.index = pandas.MultiIndex.from_tuples(
[(i // 10, i // 5, i) for i in range(len(pandas_df))]
)
with pytest.warns(UserWarning):
df_equals(modin_df.sort_index(level=0), pandas_df.sort_index(level=0))
with pytest.warns(UserWarning):
df_equals(modin_df.sort_index(axis=0), pandas_df.sort_index(axis=0))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"ascending", bool_arg_values, ids=arg_keys("ascending", bool_arg_keys)
)
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
def test_sort_values(self, request, data, axis, ascending, na_position):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name and (
(axis == 0 or axis == "over rows")
or name_contains(request.node.name, numeric_dfs)
):
index = (
modin_df.index if axis == 1 or axis == "columns" else modin_df.columns
)
key = index[0]
modin_result = modin_df.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
pandas_result = pandas_df.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
pandas_df_cp.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
df_equals(modin_df_cp, pandas_df_cp)
keys = [key, index[-1]]
modin_result = modin_df.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
pandas_result = pandas_df.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
pandas_df_cp.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
df_equals(modin_df_cp, pandas_df_cp)
def test_squeeze(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
frame_data_2 = {"col1": [0, 1, 2, 3]}
frame_data_3 = {
"col1": [0],
"col2": [4],
"col3": [8],
"col4": [12],
"col5": [0],
}
frame_data_4 = {"col1": [2]}
frame_data_5 = {"col1": ["string"]}
# Different data for different cases
pandas_df = pandas.DataFrame(frame_data).squeeze()
ray_df = pd.DataFrame(frame_data).squeeze()
df_equals(ray_df, pandas_df)
pandas_df_2 = pandas.DataFrame(frame_data_2).squeeze()
ray_df_2 = pd.DataFrame(frame_data_2).squeeze()
df_equals(ray_df_2, pandas_df_2)
pandas_df_3 = pandas.DataFrame(frame_data_3).squeeze()
ray_df_3 = pd.DataFrame(frame_data_3).squeeze()
df_equals(ray_df_3, pandas_df_3)
pandas_df_4 = pandas.DataFrame(frame_data_4).squeeze()
ray_df_4 = pd.DataFrame(frame_data_4).squeeze()
df_equals(ray_df_4, pandas_df_4)
pandas_df_5 = pandas.DataFrame(frame_data_5).squeeze()
ray_df_5 = pd.DataFrame(frame_data_5).squeeze()
df_equals(ray_df_5, pandas_df_5)
data = [
[
pd.Timestamp("2019-01-02"),
pd.Timestamp("2019-01-03"),
pd.Timestamp("2019-01-04"),
pd.Timestamp("2019-01-05"),
],
[1, 1, 1, 2],
]
df = pd.DataFrame(data, index=["date", "value"]).T
pf = pandas.DataFrame(data, index=["date", "value"]).T
df.set_index("date", inplace=True)
pf.set_index("date", inplace=True)
df_equals(df.iloc[0], pf.iloc[0])
def test_stack(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).stack()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize("ddof", int_arg_values, ids=arg_keys("ddof", int_arg_keys))
def test_std(self, request, data, axis, skipna, numeric_only, ddof):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.T.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
def test_style(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).style
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_sum(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.sum(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.sum(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sum_single_column(self, data):
modin_df = pd.DataFrame(data).iloc[:, [0]]
pandas_df = pandas.DataFrame(data).iloc[:, [0]]
df_equals(modin_df.sum(), pandas_df.sum())
df_equals(modin_df.sum(axis=1), pandas_df.sum(axis=1))
def test_swapaxes(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).swapaxes(0, 1)
def test_swaplevel(self):
df = pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
)
)
df["Value"] = np.random.randint(1, 100, len(df))
with pytest.warns(UserWarning):
df.swaplevel("Number", "Color")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_tail(self, data, n):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.tail(n), pandas_df.tail(n))
df_equals(modin_df.tail(len(modin_df)), pandas_df.tail(len(pandas_df)))
def test_take(self):
df = pd.DataFrame(
[
("falcon", "bird", 389.0),
("parrot", "bird", 24.0),
("lion", "mammal", 80.5),
("monkey", "mammal", np.nan),
],
columns=["name", "class", "max_speed"],
index=[0, 2, 3, 1],
)
with pytest.warns(UserWarning):
df.take([0, 3])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_records(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# Skips nan because only difference is nan instead of NaN
if not name_contains(request.node.name, ["nan"]):
assert np.array_equal(modin_df.to_records(), pandas_df.to_records())
def test_to_sparse(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).to_sparse()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_string(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# Skips nan because only difference is nan instead of NaN
if not name_contains(request.node.name, ["nan"]):
assert modin_df.to_string() == to_pandas(modin_df).to_string()
def test_to_timestamp(self):
idx = pd.date_range("1/1/2012", periods=5, freq="M")
df = pd.DataFrame(np.random.randint(0, 100, size=(len(idx), 4)), index=idx)
with pytest.warns(UserWarning):
df.to_period().to_timestamp()
def test_to_xarray(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).to_xarray()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_transform(self, request, data, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.transform(func)
except Exception as e:
with pytest.raises(type(e)):
modin_df.transform(func)
else:
modin_result = modin_df.transform(func)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_transform_numeric(self, request, data, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.transform(func)
except Exception as e:
with pytest.raises(type(e)):
modin_df.transform(func)
else:
modin_result = modin_df.transform(func)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.T, pandas_df.T)
df_equals(modin_df.transpose(), pandas_df.transpose())
# Uncomment below once #165 is merged
# Test for map across full axis for select indices
# df_equals(modin_df.T.dropna(), pandas_df.T.dropna())
# Test for map across full axis
# df_equals(modin_df.T.nunique(), pandas_df.T.nunique())
# Test for map across blocks
# df_equals(modin_df.T.notna(), pandas_df.T.notna())
def test_truncate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).truncate()
def test_tshift(self):
idx = pd.date_range("1/1/2012", periods=5, freq="M")
df = pd.DataFrame(np.random.randint(0, 100, size=(len(idx), 4)), index=idx)
with pytest.warns(UserWarning):
df.to_period().tshift()
def test_tz_convert(self):
idx = pd.date_range("1/1/2012", periods=5, freq="M")
df = pd.DataFrame(np.random.randint(0, 100, size=(len(idx), 4)), index=idx)
with pytest.warns(UserWarning):
df.tz_localize("America/Los_Angeles").tz_convert("America/Los_Angeles")
def test_tz_localize(self):
idx = pd.date_range("1/1/2012", periods=5, freq="M")
df = pd.DataFrame(np.random.randint(0, 100, size=(len(idx), 4)), index=idx)
with pytest.warns(UserWarning):
df.tz_localize("America/Los_Angeles")
def test_unstack(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).unstack()
def test_update(self):
df = pd.DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = pd.DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = pd.DataFrame(
[[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
df_equals(df, expected)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_values(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
np.testing.assert_equal(modin_df.values, pandas_df.values)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize("ddof", int_arg_values, ids=arg_keys("ddof", int_arg_keys))
def test_var(self, request, data, axis, skipna, numeric_only, ddof):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception:
with pytest.raises(TypeError):
modin_df.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.T.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
def test_where(self):
frame_data = random_state.randn(100, 10)
pandas_df = pandas.DataFrame(frame_data, columns=list("abcdefghij"))
modin_df = pd.DataFrame(frame_data, columns=list("abcdefghij"))
pandas_cond_df = pandas_df % 5 < 2
modin_cond_df = modin_df % 5 < 2
pandas_result = pandas_df.where(pandas_cond_df, -pandas_df)
modin_result = modin_df.where(modin_cond_df, -modin_df)
assert all((to_pandas(modin_result) == pandas_result).all())
other = pandas_df.loc[3]
pandas_result = pandas_df.where(pandas_cond_df, other, axis=1)
modin_result = modin_df.where(modin_cond_df, other, axis=1)
assert all((to_pandas(modin_result) == pandas_result).all())
other = pandas_df["e"]
pandas_result = pandas_df.where(pandas_cond_df, other, axis=0)
modin_result = modin_df.where(modin_cond_df, other, axis=0)
assert all((to_pandas(modin_result) == pandas_result).all())
pandas_result = pandas_df.where(pandas_df < 2, True)
modin_result = modin_df.where(modin_df < 2, True)
assert all((to_pandas(modin_result) == pandas_result).all())
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
df = pd.DataFrame(data=d)
df = df.set_index(["class", "animal", "locomotion"])
with pytest.warns(UserWarning):
df.xs("mammal")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___getitem__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key = modin_df.columns[0]
modin_col = modin_df.__getitem__(key)
assert isinstance(modin_col, pd.Series)
pd_col = pandas_df[key]
df_equals(pd_col, modin_col)
slices = [
(None, -1),
(-1, None),
(1, 2),
(1, None),
(None, 1),
(1, -1),
(-3, -1),
(1, -1, 2),
]
# slice test
for slice_param in slices:
s = slice(*slice_param)
df_equals(modin_df[s], pandas_df[s])
# Test empty
df_equals(pd.DataFrame([])[:10], pandas.DataFrame([])[:10])
def test_getitem_empty_mask(self):
# modin-project/modin#517
modin_frames = []
pandas_frames = []
data1 = np.random.randint(0, 100, size=(100, 4))
mdf1 = pd.DataFrame(data1, columns=list("ABCD"))
pdf1 = pandas.DataFrame(data1, columns=list("ABCD"))
modin_frames.append(mdf1)
pandas_frames.append(pdf1)
data2 = np.random.randint(0, 100, size=(100, 4))
mdf2 = pd.DataFrame(data2, columns=list("ABCD"))
pdf2 = pandas.DataFrame(data2, columns=list("ABCD"))
modin_frames.append(mdf2)
pandas_frames.append(pdf2)
data3 = np.random.randint(0, 100, size=(100, 4))
mdf3 = pd.DataFrame(data3, columns=list("ABCD"))
pdf3 = pandas.DataFrame(data3, columns=list("ABCD"))
modin_frames.append(mdf3)
pandas_frames.append(pdf3)
modin_data = pd.concat(modin_frames)
pandas_data = pandas.concat(pandas_frames)
df_equals(
modin_data[[False for _ in modin_data.index]],
pandas_data[[False for _ in modin_data.index]],
)
def test_getitem_datetime_slice(self):
data = {"data": range(1000)}
index = pd.date_range("2017/1/4", periods=1000)
modin_df = pd.DataFrame(data=data, index=index)
pandas_df = pandas.DataFrame(data=data, index=index)
s = slice("2017-01-06", "2017-01-09")
df_equals(modin_df[s], pandas_df[s])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___getattr__(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
col = modin_df.__getattr__(key)
col = modin_df.__getattr__("col1")
assert isinstance(col, pd.Series)
col = getattr(modin_df, "col1")
assert isinstance(col, pd.Series)
# Check that lookup in column doesn't override other attributes
df2 = modin_df.rename(index=str, columns={key: "columns"})
assert isinstance(df2.columns, pandas.Index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___setitem__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.__setitem__(modin_df.columns[-1], 1)
pandas_df.__setitem__(pandas_df.columns[-1], 1)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df[modin_df.columns[-1]] = pd.DataFrame(modin_df[modin_df.columns[0]])
pandas_df[pandas_df.columns[-1]] = pandas.DataFrame(
pandas_df[pandas_df.columns[0]]
)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
rows = len(modin_df)
arr = np.arange(rows * 2).reshape(-1, 2)
modin_df[modin_df.columns[-1]] = arr
pandas_df[pandas_df.columns[-1]] = arr
df_equals(pandas_df, modin_df)
with pytest.raises(ValueError, match=r"Wrong number of items passed"):
modin_df["___NON EXISTENT COLUMN"] = arr
modin_df[modin_df.columns[0]] = np.arange(len(modin_df))
pandas_df[pandas_df.columns[0]] = np.arange(len(pandas_df))
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(columns=modin_df.columns)
pandas_df = pandas.DataFrame(columns=pandas_df.columns)
for col in modin_df.columns:
modin_df[col] = np.arange(1000)
for col in pandas_df.columns:
pandas_df[col] = np.arange(1000)
df_equals(modin_df, pandas_df)
# Test series assignment to column
modin_df = pd.DataFrame(columns=modin_df.columns)
pandas_df = pandas.DataFrame(columns=pandas_df.columns)
modin_df[modin_df.columns[-1]] = modin_df[modin_df.columns[0]]
pandas_df[pandas_df.columns[-1]] = pandas_df[pandas_df.columns[0]]
df_equals(modin_df, pandas_df)
# Transpose test
modin_df = pd.DataFrame(data).T
pandas_df = pandas.DataFrame(data).T
# We default to pandas on non-string column names
if not all(isinstance(c, str) for c in modin_df.columns):
with pytest.warns(UserWarning):
modin_df[modin_df.columns[0]] = 0
else:
modin_df[modin_df.columns[0]] = 0
pandas_df[pandas_df.columns[0]] = 0
df_equals(modin_df, pandas_df)
modin_df.columns = [str(i) for i in modin_df.columns]
pandas_df.columns = [str(i) for i in pandas_df.columns]
modin_df[modin_df.columns[0]] = 0
pandas_df[pandas_df.columns[0]] = 0
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___len__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert len(modin_df) == len(pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___neg__(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.__neg__()
except Exception as e:
with pytest.raises(type(e)):
modin_df.__neg__()
else:
modin_result = modin_df.__neg__()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___invert__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = ~pandas_df
except Exception as e:
with pytest.raises(type(e)):
repr(~modin_df)
else:
modin_result = ~modin_df
df_equals(modin_result, pandas_result)
def test___hash__(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
try:
pd.DataFrame(data).__hash__()
except TypeError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___iter__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterator = modin_df.__iter__()
# Check that modin_iterator implements the iterator interface
assert hasattr(modin_iterator, "__iter__")
assert hasattr(modin_iterator, "next") or hasattr(modin_iterator, "__next__")
pd_iterator = pandas_df.__iter__()
assert list(modin_iterator) == list(pd_iterator)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___contains__(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
result = False
key = "Not Exist"
assert result == modin_df.__contains__(key)
assert result == (key in modin_df)
if "empty_data" not in request.node.name:
result = True
key = pandas_df.columns[0]
assert result == modin_df.__contains__(key)
assert result == (key in modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___nonzero__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
# Always raises ValueError
modin_df.__nonzero__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___abs__(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = abs(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
abs(modin_df)
else:
modin_result = abs(modin_df)
df_equals(modin_result, pandas_result)
def test___round__(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).__round__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___array__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert_array_equal(modin_df.__array__(), pandas_df.__array__())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___bool__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.__bool__()
except Exception as e:
with pytest.raises(type(e)):
modin_df.__bool__()
else:
modin_result = modin_df.__bool__()
df_equals(modin_result, pandas_result)
def test___getstate__(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).__getstate__()
def test___setstate__(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
try:
pd.DataFrame(data).__setstate__(None)
except TypeError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___delitem__(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = pandas_df.columns[0]
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
modin_df.__delitem__(key)
pandas_df.__delitem__(key)
df_equals(modin_df, pandas_df)
# Issue 2027
last_label = pandas_df.iloc[:, -1].name
modin_df.__delitem__(last_label)
pandas_df.__delitem__(last_label)
df_equals(modin_df, pandas_df)
def test__options_display(self):
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(1000, 102))
pandas_df = | pandas.DataFrame(frame_data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Discriminator Model Performance
# This notebook is designed to analyze the discriminator model's performance. Once the generative model labels our data, the discriminator model takes those labels and improves on predictions. For this notebook we are using a generative model trained on Compound treats Disease label functions to predict Compound treats Disease sentences. Performance for each model is reported in area under the receiver operating curve (AUROC) and area under the precision recall curve (AUPR).
# In[1]:
import glob
import os
import pandas as pd
import plotnine as p9
import scipy.stats as ss
from sklearn.metrics import auc, precision_recall_curve, roc_curve, precision_recall_fscore_support
# # Tune Set
# ## Performance of Disc model vs Gen model for each Label Sample
# In[2]:
dev_labels = pd.read_csv("input/ctd_dev_labels.tsv", sep="\t")
dev_labels.head()
# In[3]:
candidate_df = (
pd.read_excel("../data/sentences/sentence_labels_dev.xlsx")
.sort_values("candidate_id")
.query("curated_ctd.notnull()")
)
candidate_df.head()
# In[4]:
gen_model_results_dev_df = pd.read_csv(
"../label_sampling_experiment/results/CtD/results/dev_sampled_results.tsv",
sep="\t"
)
# In[5]:
disc_model_dict = {}
for value in gen_model_results_dev_df.lf_num.unique():
disc_model_dict[value] = (
pd.read_csv(f"input/disc_model_run/{value}/tune.tsv", sep="\t")
)
# In[6]:
def get_au_performance(predictions, gold_labels):
fpr, tpr, _ = roc_curve(
gold_labels,
predictions
)
precision, recall, _ = precision_recall_curve(
gold_labels,
predictions
)
return auc(fpr, tpr), auc(recall, precision)
# In[7]:
records = []
for sample in disc_model_dict:
for column in disc_model_dict[sample].drop("candidate_id", axis=1).columns:
aucs = get_au_performance(
disc_model_dict[sample][column],
candidate_df
.query(f"candidate_id in {disc_model_dict[value].candidate_id.values.tolist()}")
.curated_ctd
.values
)
records.append({
"model": "disc_model",
"lf_num": int(sample),
"auroc": aucs[0],
"aupr": aucs[1]
})
dev_set_df = (
| pd.DataFrame.from_records(records) | pandas.DataFrame.from_records |
import logging
from enum import Enum
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
import pandas as pd
from pydantic import Field
from typing_extensions import Literal
from feaflow.abstracts import FeaflowImmutableModel, Source, SourceConfig
logger = logging.getLogger(__name__)
class PandasDataFrameSourceSupportedFileTypes(str, Enum):
PICKLE = "pickle"
CSV = "csv"
JSON = "json"
PARQUET = "parquet"
ORC = "orc"
class PandasDataFrameSourceFileConfig(FeaflowImmutableModel):
_template_attrs: Tuple[str] = ("path", "args")
type: PandasDataFrameSourceSupportedFileTypes
path: Union[str, Path]
args: Dict[str, Any] = {}
class PandasDataFrameSourceConfig(SourceConfig):
_template_attrs: Tuple[str] = ("file",)
type: Literal["pandas"] = "pandas"
dict_: Optional[Dict[str, Any]] = Field(alias="dict", default=None)
file: Optional[PandasDataFrameSourceFileConfig] = None
def __init__(self, **data):
assert "dict" in data or "file" in data
super().__init__(**data)
class PandasDataFrameSource(Source):
def __init__(self, config: PandasDataFrameSourceConfig):
logger.info("Constructing PandasDataFrameSource")
logger.debug("With config %s", config)
assert isinstance(config, PandasDataFrameSourceConfig)
super().__init__(config)
def get_dataframe(
self, template_context: Optional[Dict[str, Any]] = None
) -> pd.DataFrame:
config: PandasDataFrameSourceConfig = self.get_config(
template_context=template_context
)
if config.dict_ is not None:
logger.info("Constructing a Pandas DataFrame from a Dict")
return | pd.DataFrame(config.dict_) | pandas.DataFrame |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from somecode import stopword
import pretty as pretty
def cooc_plot(data,stop_words=""):
pretty.warnings()
tweet_text = data.text.str.replace('[^A-Za-z0-9@#]+', " ")
words_keyword = pd.Series(' '.join(tweet_text).lower().split())
words_keyword = [line.decode('utf-8').strip() for line in words_keyword]
words_keyword = pd.Series(words_keyword).replace('[^A-Za-z0-9#]+', " ")
## MODULE FOR KEYWORD ANALYSIS
l = []
for word in words_keyword:
if word not in stop_words:
if word not in stopword.stopword():
if len(word) > 2:
l.append(word)
l = | pd.Series(l) | pandas.Series |
from copy import deepcopy
import numpy as np
import pandas as pd
from .base import BaseJpegImageDataset
class PairOfAnchorPositivieNegativeDataset(BaseJpegImageDataset):
def __init__(
self,
csv_filename,
input_column,
target_column=None,
input_dir="../data/input",
extension=".jpg",
target_unique_values=None,
num_classes=None,
enable_load=True,
images_dir="",
split="train",
transform=None,
fold_column="Fold",
num_fold=5,
idx_fold=0,
label_smoothing=0,
return_input_as_x=True,
csv_input_dir=None,
# for contrastive learning
num_negatives=1,
**params,
):
super().__init__(
csv_filename=csv_filename,
input_column=input_column,
target_column=target_column,
input_dir=input_dir,
extension=extension,
target_unique_values=target_unique_values,
num_classes=num_classes,
enable_load=enable_load,
images_dir=images_dir,
split=split,
transform=transform,
fold_column=fold_column,
num_fold=num_fold,
idx_fold=idx_fold,
label_smoothing=label_smoothing,
return_input_as_x=return_input_as_x,
csv_input_dir=csv_input_dir,
)
self.num_negatives = num_negatives
# Inputs which the target appears only once are not suitable
# for train data because positive samples cannot be sampled.
self.all_inputs = deepcopy(self.inputs)
self.all_targets = deepcopy(self.targets)
self.inputs = | pd.Series(self.inputs) | pandas.Series |
import argparse
import logging
from typing import Any, Dict, List, Optional
import pandas
import annofabcli
import annofabcli.common.cli
from annofabcli import AnnofabApiFacade
from annofabcli.common.cli import AbstractCommandLineInterface, ArgumentParser, build_annofabapi_resource_and_login
from annofabcli.common.utils import isoduration_to_hour
logger = logging.getLogger(__name__)
class LaborTimePerUser(AbstractCommandLineInterface):
"""
メンバ別の作業時間を出力する。
"""
def get_labor_time_per_user(self, project_id: str) -> List[Dict[str, Any]]:
"""
メンバ別の作業時間をCSVに出力するための dict 配列を作成する。
Args:
project_id:
Returns:
メンバ別の作業時間をCSVに出力するための dict 配列
"""
account_statistics = self.service.wrapper.get_account_statistics(project_id)
row_list: List[Dict[str, Any]] = []
for stat_by_user in account_statistics:
account_id = stat_by_user["account_id"]
member = self.facade.get_project_member_from_account_id(project_id, account_id)
histories = stat_by_user["histories"]
for stat in histories:
stat["account_id"] = account_id
stat["user_id"] = member["user_id"] if member is not None else None
stat["username"] = member["username"] if member is not None else None
stat["biography"] = member["biography"] if member is not None else None
stat["worktime_hour"] = isoduration_to_hour(stat["worktime"])
row_list.extend(histories)
return row_list
def list_cumulative_labor_time(self, project_id: str) -> None:
super().validate_project(project_id, project_member_roles=None)
account_stat_list = self.get_labor_time_per_user(project_id)
df = | pandas.DataFrame(account_stat_list) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 19 2020
@author: <NAME> (@SergioMinuto90)
"""
from pandas.io.json import json_normalize
from abc import ABC, abstractmethod
import socceraction.vaep as vaep
import pandas as pd
import warnings
import os
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
from processing import PassingNetworkBuilder
from utils import read_json
class StatsBombPassingNetwork(PassingNetworkBuilder, ABC):
def __init__(self, args):
self.plot_type = args.plot_type
self.team_name = args.team_name
self.match_id = args.match_id
self.plot_name = None
self.df_events = None
self.plot_title = None
self.names_dict = None
self.plot_legend = None
self.num_minutes = None
self.player_position = None
self.pair_pass_value = None
self.pair_pass_count = None
self.player_pass_value = None
self.player_pass_count = None
def read_data(self):
"""
Read StatsBomb eventing data of the selected 'match_id', generating a pandas DataFrame
with the events and a dictionary of player names and nicknames.
"""
# Player name translation dict
lineups = read_json("data/eventing/lineups/{0}.json".format(self.match_id))
self.names_dict = {player["player_name"]: player["player_nickname"]
for team in lineups for player in team["lineup"]}
# Pandas dataframe containing the events of the match
events = read_json("data/eventing/events/{0}.json".format(self.match_id))
self.df_events = json_normalize(events, sep="_").assign(match_id=self.match_id)
def compute_total_minutes(self):
"""
Compute the maximum number of minutes that are used for the passing network.
The idea is not to have more/less than 11 players in the team because of substitutions or red cards.
"""
first_red_card_minute = self.df_events[self.df_events.foul_committed_card_name.isin(["Second Yellow", "Red Card"])].minute.min()
first_substitution_minute = self.df_events[self.df_events.type_name == "Substitution"].minute.min()
max_minute = self.df_events.minute.max()
self.num_minutes = min(first_substitution_minute, first_red_card_minute, max_minute)
def set_text_info(self):
"""
Set the plot's name, title and legend information based on the customization chosen with the command line arguments.
"""
# Name of the .PNG in the plots/ folder
self.plot_name = "statsbomb_match{0}_{1}_{2}".format(self.match_id, self.team_name, self.plot_type)
# Title of the plot
opponent_team = [x for x in self.df_events.team_name.unique() if x != self.team_name][0]
self.plot_title ="{0}'s passing network against {1} (StatsBomb eventing data)".format(self.team_name, opponent_team)
# Information in the legend
color_meaning = "pass value (VAEP)" if self.plot_type == "pass_value" else "number of passes"
self.plot_legend = "Location: pass origin\nSize: number of passes\nColor: {0}".format(color_meaning)
@abstractmethod
def prepare_data(self):
pass
@staticmethod
def _statsbomb_to_point(location, max_width=120, max_height=80):
'''
Convert a point's coordinates from a StatsBomb's range to 0-1 range.
'''
return location[0] / max_width, 1-(location[1] / max_height)
class StatsBombBasicPassingNetwork(StatsBombPassingNetwork):
def __init__(self, args):
super(StatsBombBasicPassingNetwork, self).__init__(args)
def prepare_data(self):
"""
Prepares the five pandas DataFrames that 'draw_pass_map' needs.
"""
# We select all successful passes done by the selected team before the minute
# of the first substitution or red card.
df_passes = self.df_events[(self.df_events.type_name == "Pass") &
(self.df_events.pass_outcome_name.isna()) &
(self.df_events.team_name == self.team_name) &
(self.df_events.minute < self.num_minutes)].copy()
# If available, use player's nickname instead of full name to optimize space in plot
df_passes["pass_recipient_name"] = df_passes.pass_recipient_name.apply(lambda x: self.names_dict[x] if self.names_dict[x] else x)
df_passes["player_name"] = df_passes.player_name.apply(lambda x: self.names_dict[x] if self.names_dict[x] else x)
# In this type of plot, both the size and color (i.e. value) mean the same: number of passes
self.player_pass_count = df_passes.groupby("player_name").size().to_frame("num_passes")
self.player_pass_value = df_passes.groupby("player_name").size().to_frame("pass_value")
# 'pair_key' combines the names of the passer and receiver of each pass (sorted alphabetically)
df_passes["pair_key"] = df_passes.apply(lambda x: "_".join(sorted([x["player_name"], x["pass_recipient_name"]])), axis=1)
self.pair_pass_count = df_passes.groupby("pair_key").size().to_frame("num_passes")
self.pair_pass_value = df_passes.groupby("pair_key").size().to_frame("pass_value")
# Average pass origin's coordinates for each player
df_passes["origin_pos_x"] = df_passes.location.apply(lambda x: self._statsbomb_to_point(x)[0])
df_passes["origin_pos_y"] = df_passes.location.apply(lambda x: self._statsbomb_to_point(x)[1])
self.player_position = df_passes.groupby("player_name").agg({"origin_pos_x": "median", "origin_pos_y": "median"})
class StatsBombValuePassingNetwork(StatsBombPassingNetwork):
def __init__(self, args):
super(StatsBombValuePassingNetwork, self).__init__(args)
# This data must be prepared on advance by running the 'predict_vaep.py' script
self.predictions_h5 = os.path.join("data/eventing", "predictions.h5")
spadl_h5 = os.path.join("data/eventing", "spadl-statsbomb.h5")
self.actiontypes = pd.read_hdf(spadl_h5, "actiontypes")
self.bodyparts = pd.read_hdf(spadl_h5, "bodyparts")
self.results = pd.read_hdf(spadl_h5, "results")
self.players = pd.read_hdf(spadl_h5, "players")
self.teams = pd.read_hdf(spadl_h5, "teams")
self.actions = pd.read_hdf(spadl_h5, "actions/game_{0}".format(self.match_id))
def prepare_data(self):
"""
Prepares the five pandas DataFrames that 'draw_pass_map' needs.
"""
# We select all successful passes done by the selected team before the minute
# of the first substitution or red card.
df_passes = self.df_events[(self.df_events.type_name == "Pass") &
(self.df_events.pass_outcome_name.isna()) &
(self.df_events.team_name == self.team_name) &
(self.df_events.minute < self.num_minutes)].copy()
# If available, use player's nickname instead of full name to optimize space in plot
df_passes["pass_recipient_name"] = df_passes.pass_recipient_name.apply(lambda x: self.names_dict[x] if self.names_dict[x] else x)
df_passes["player_name"] = df_passes.player_name.apply(lambda x: self.names_dict[x] if self.names_dict[x] else x)
# Set the VAEP metric to each pass
actions = (
self.actions.merge(self.actiontypes,how="left")
.merge(self.results,how="left")
.merge(self.bodyparts,how="left")
.merge(self.players,how="left")
.merge(self.teams,how="left")
)
preds = pd.read_hdf(self.predictions_h5, "game_{0}".format(self.match_id))
values = vaep.value(actions, preds.scores, preds.concedes)
df_vaep = | pd.concat([actions, preds, values], axis=1) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[ | Timestamp("2011-01-01") | pandas.Timestamp |
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
def predictAll(test_X, batch_size=100):
predict_value_list = []
for i in range(0, len(test_X), batch_size):
selected_X = test_X[i: i + batch_size]
predict_value = session.run(predict_Y, {X_holder:selected_X})
predict_value_list.extend(predict_value)
return np.array(predict_value_list)
Y = predictAll(test_X)
y = np.argmax(Y, axis=1)
predict_label_list = labelEncoder.inverse_transform(y)
pd.DataFrame(confusion_matrix(test_label_list, predict_label_list),
columns=labelEncoder.classes_,
index=labelEncoder.classes_ )
def eval_model(y_true, y_pred, labels):
# 计算每个分类的Precision, Recall, f1, support
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred)
# 计算总体的平均Precision, Recall, f1, support
tot_p = np.average(p, weights=s)
tot_r = np.average(r, weights=s)
tot_f1 = np.average(f1, weights=s)
tot_s = np.sum(s)
res1 = pd.DataFrame({
u'Label': labels,
u'Precision': p,
u'Recall': r,
u'F1': f1,
u'Support': s
})
res2 = pd.DataFrame({
u'Label': ['总体'],
u'Precision': [tot_p],
u'Recall': [tot_r],
u'F1': [tot_f1],
u'Support': [tot_s]
})
res2.index = [999]
res = | pd.concat([res1, res2]) | pandas.concat |
import unittest
import dolphindb as ddb
import numpy as np
import pandas as pd
from setup import HOST, PORT, WORK_DIR, DATA_DIR
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pandas.testing import assert_series_equal
from pandas.testing import assert_frame_equal
class TestBasicDataTypes(unittest.TestCase):
@classmethod
def setUp(cls):
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
@classmethod
def tearDownClass(cls):
pass
def test_int_scalar(self):
re = self.s.run("100")
self.assertEqual(re, 100)
re = self.s.run("int()")
self.assertIsNone(re)
def test_bool_scalar(self):
re = self.s.run("true")
self.assertEqual(re, True)
re = self.s.run("bool()")
self.assertIsNone(re)
def test_char_scalar(self):
re = self.s.run("'a'")
self.assertEqual(re, 97)
re = self.s.run("char()")
self.assertIsNone(re)
def test_short_scalar(self):
re = self.s.run("112h")
self.assertEqual(re, 112)
re = self.s.run("short()")
self.assertIsNone(re)
def test_long_scalar(self):
re = self.s.run("22l")
self.assertEqual(re, 22)
re = self.s.run("long()")
self.assertIsNone(re)
def test_date_scalar(self):
re = self.s.run("2012.06.12")
self.assertEqual(re, np.datetime64('2012-06-12'))
re = self.s.run("date()")
self.assertIsNone(re)
def test_month_scalar(self):
re = self.s.run("2012.06M")
self.assertEqual(re, np.datetime64('2012-06'))
re = self.s.run("month()")
self.assertIsNone(re)
def test_time_scalar(self):
re = self.s.run("12:30:00.008")
self.assertEqual(re, np.datetime64('1970-01-01T12:30:00.008'))
re = self.s.run("time()")
self.assertIsNone(re)
def test_minute_scalar(self):
re = self.s.run("12:30m")
self.assertEqual(re, np.datetime64('1970-01-01T12:30'))
re = self.s.run("minute()")
self.assertIsNone(re)
def test_second_scalar(self):
re = self.s.run("12:30:10")
self.assertEqual(re, np.datetime64('1970-01-01T12:30:10'))
re = self.s.run("second()")
self.assertIsNone(re)
def test_datetime_scalar(self):
re = self.s.run('2012.06.13 13:30:10')
self.assertEqual(re, np.datetime64('2012-06-13T13:30:10'))
re = self.s.run("datetime()")
self.assertIsNone(re)
def test_timestamp_scalar(self):
re = self.s.run('2012.06.13 13:30:10.008')
self.assertEqual(re, np.datetime64('2012-06-13T13:30:10.008'))
re = self.s.run("timestamp()")
self.assertIsNone(re)
def test_nanotime_scalar(self):
re = self.s.run('13:30:10.008007006')
self.assertEqual(re, np.datetime64('1970-01-01T13:30:10.008007006'))
re = self.s.run("nanotime()")
self.assertIsNone(re)
def test_nanotimestamp_scalar(self):
re = self.s.run('2012.06.13 13:30:10.008007006')
self.assertEqual(re, np.datetime64('2012-06-13T13:30:10.008007006'))
re = self.s.run("nanotimestamp()")
self.assertIsNone(re)
def test_float_scalar(self):
re = self.s.run('2.1f')
self.assertEqual(round(re), 2)
re = self.s.run("float()")
self.assertIsNone(re)
def test_double_scalar(self):
re = self.s.run('2.1')
self.assertEqual(re, 2.1)
re = self.s.run("double()")
self.assertIsNone(re)
def test_string_scalar(self):
re = self.s.run('"abc"')
self.assertEqual(re, 'abc')
re = self.s.run("string()")
self.assertIsNone(re)
def test_uuid_scalar(self):
re = self.s.run("uuid('5d212a78-cc48-e3b1-4235-b4d91473ee87')")
self.assertEqual(re, '5d212a78-cc48-e3b1-4235-b4d91473ee87')
re = self.s.run("uuid()")
self.assertIsNone(re)
def test_ipaddr_sclar(self):
re = self.s.run("ipaddr('192.168.1.135')")
self.assertEqual(re, '192.168.1.135')
re = self.s.run("ipaddr()")
self.assertIsNone(re)
def test_int128_scalar(self):
re = self.s.run("int128('e1671797c52e15f763380b45e841ec32')")
self.assertEqual(re, 'e1671797c52e15f763380b45e841ec32')
re = self.s.run("int128()")
self.assertIsNone(re)
def test_python_datetime64_dolphindb_date_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('date', ts), np.datetime64('2019-01-01'))
def test_python_datetime64_dolphindb_month_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('month', ts), np.datetime64('2019-01'))
def test_python_datetime64_dolphindb_time_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('time', ts), np.datetime64('1970-01-01T20:01:01.122'))
def test_python_datetime64_dolphindb_minute_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('minute', ts), np.datetime64('1970-01-01T20:01'))
def test_python_datetime64_dolphindb_second_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('second', ts), np.datetime64('1970-01-01T20:01:01'))
def test_python_datetime64_dolphindb_datetime_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('datetime', ts), np.datetime64('2019-01-01T20:01:01'))
def test_python_datetime64_dolphindb_timestamp_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('timestamp', ts), np.datetime64('2019-01-01T20:01:01.122'))
def test_python_datetime64_dolphindb_nanotime_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('nanotime', ts), np.datetime64('1970-01-01T20:01:01.122346100'))
def test_python_datetime64_dolphindb_nanotimestamp_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('nanotimestamp', ts), np.datetime64('2019-01-01T20:01:01.122346100'))
def test_string_vector(self):
re = self.s.run("`IBM`GOOG`YHOO")
self.assertEqual((re == ['IBM', 'GOOG', 'YHOO']).all(), True)
re = self.s.run("['IBM', string(), 'GOOG']")
self.assertEqual((re==['IBM', '', 'GOOG']).all(), True)
re = self.s.run("[string(), string(), string()]")
self.assertEqual((re==['','','']).all(), True)
def test_function_def(self):
re = self.s.run("def f(a,b){return a+b}")
re = self.s.run("f(1, 2)")
self.assertEqual(re, 3)
def test_symbol_vector(self):
re = self.s.run("symbol(`IBM`MSFT`GOOG`BIDU)")
self.assertEqual((re == ['IBM', 'MSFT', 'GOOG', 'BIDU']).all(), True)
re = self.s.run("symbol(['IBM', '', 'GOOG'])")
self.assertEqual((re==['IBM', '', 'GOOG']).all(), True)
re = self.s.run("symbol(['', '', ''])")
self.assertEqual((re==['', '', '']).all(), True)
def test_char_vector(self):
re = self.s.run("['a', 'b', 'c']")
expected = [97, 98, 99]
self.assertEqual((re==expected).all(), True)
re = self.s.run("['a', char(), 'c']")
expected = [97.0, np.nan, 99.0]
assert_array_almost_equal(re, expected)
def test_bool_vector(self):
re = self.s.run("[true, false, true]")
expected = [True, False, True]
assert_array_equal(re, expected)
re = self.s.run("[true, false, bool()]")
assert_array_equal(re[0:2], [True, False])
self.assertTrue(np.isnan(re[2]))
re = self.s.run("[bool(), bool(), bool()]")
self.assertTrue(np.isnan(re[0]))
self.assertTrue(np.isnan(re[1]))
self.assertTrue(np.isnan(re[2]))
def test_int_vector(self):
re = self.s.run("2938 2920 54938 1999 2333")
self.assertEqual((re == [2938, 2920, 54938, 1999, 2333]).all(), True)
re = self.s.run("[2938, int(), 6552]")
expected = [2938.0, np.nan, 6552.0]
assert_array_almost_equal(re, expected, 1)
re = self.s.run("[int(), int(), int()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_short_vector(self):
re = self.s.run("[10h, 11h, 12h]")
expected = [10, 11, 12]
assert_array_equal(re, expected)
re = self.s.run("[10h, short(), 12h]")
expected = [10.0, np.nan, 12.0]
assert_array_almost_equal(re, expected)
re = self.s.run("[short(), short(), short()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_long_vector(self):
re = self.s.run("[10l, 11l, 12l]")
expected = [10, 11, 12]
assert_array_equal(re, expected)
re = self.s.run("[10l, long(), 12l]")
expected = [10.0, np.nan, 12.0]
assert_array_almost_equal(re, expected)
re = self.s.run("[long(), long(), long()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_double_vector(self):
re = self.s.run("rand(10.0,10)")
self.assertEqual(len(re), 10)
re = self.s.run("[12.5, 26.0, double()]")
expected = [12.5, 26.0, np.nan]
assert_array_almost_equal(re, expected)
re = self.s.run("[double(), double(), double()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_float_vector(self):
re = self.s.run("[12.5f, 26.34f, 25.896f]")
expected = [12.5, 26.34, 25.896]
assert_array_almost_equal(re, expected, 3)
re = self.s.run("[12.5f, float(), 25.896f]")
expected = [12.5, np.nan, 25.896]
assert_array_almost_equal(re, expected, 3)
re = self.s.run("[float(), float(), float()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_date_vector(self):
re = self.s.run("2012.10.01 +1..3")
expected = np.array(['2012-10-02','2012-10-03','2012-10-04'], dtype="datetime64")
self.assertEqual((re == expected).all(), True)
re = self.s.run("[2012.06.01, date(), 2012.06.03]")
expected = np.array(['2012-06-01', 'NaT', '2012-06-03'], dtype="datetime64")
assert_array_equal(re, expected)
re = self.s.run("[date(), date(), date()]")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_month_vector(self):
re = self.s.run("[2012.06M, 2012.07M, 2012.08M]")
expected = [np.datetime64('2012-06'), np.datetime64('2012-07'), np.datetime64('2012-08')]
assert_array_equal(re, expected)
re = self.s.run("[2012.06M, month(), 2012.08M]")
expected = [np.datetime64('2012-06'), np.datetime64('NaT'), np.datetime64('2012-08')]
assert_array_equal(re, expected)
re = self.s.run("take(month(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_time_vector(self):
re = self.s.run("[12:30:10.008, 12:30:10.009, 12:30:10.010]")
expected = [np.datetime64('1970-01-01T12:30:10.008'), np.datetime64('1970-01-01T12:30:10.009'), np.datetime64('1970-01-01T12:30:10.010')]
assert_array_equal(re, expected)
re = self.s.run("[12:30:10.008, NULL, 12:30:10.010]")
expected = [np.datetime64('1970-01-01T12:30:10.008'), np.datetime64('NaT'), np.datetime64('1970-01-01T12:30:10.010')]
assert_array_equal(re, expected)
re = self.s.run("take(time(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_minute_vector(self):
re = self.s.run("[13:30m, 13:34m, 13:35m]")
expected = [np.datetime64('1970-01-01T13:30'), np.datetime64('1970-01-01T13:34'), np.datetime64('1970-01-01T13:35')]
assert_array_equal(re, expected)
re = self.s.run("[13:30m, minute(), 13:35m]")
expected = [np.datetime64('1970-01-01T13:30'), np.datetime64('NaT'), np.datetime64('1970-01-01T13:35')]
assert_array_equal(re, expected)
re = self.s.run("take(minute(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_second_vector(self):
re = self.s.run("[13:30:10, 13:30:11, 13:30:12]")
expected = [np.datetime64('1970-01-01T13:30:10'), np.datetime64('1970-01-01T13:30:11'), np.datetime64('1970-01-01T13:30:12')]
assert_array_equal(re, expected)
re = self.s.run("[13:30:10, second(), 13:30:12]")
expected = [np.datetime64('1970-01-01T13:30:10'), np.datetime64('NaT'), np.datetime64('1970-01-01T13:30:12')]
assert_array_equal(re, expected)
re = self.s.run("take(second(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_datetime_vector(self):
re = self.s.run("2012.10.01T15:00:04 + 2009..2011")
expected = np.array(['2012-10-01T15:33:33', '2012-10-01T15:33:34', '2012-10-01T15:33:35'], dtype="datetime64")
self.assertEqual((re == expected).all(), True)
re = self.s.run("[2012.06.01T12:30:00, datetime(), 2012.06.02T12:30:00]")
expected = np.array(['2012-06-01T12:30:00', 'NaT', '2012-06-02T12:30:00'], dtype="datetime64")
assert_array_equal(re, expected)
def test_timestamp_vector(self):
re = self.s.run("[2012.06.13T13:30:10.008, 2012.06.13T13:30:10.009, 2012.06.13T13:30:10.010]")
expected = [np.datetime64('2012-06-13T13:30:10.008'), np.datetime64('2012-06-13T13:30:10.009'), np.datetime64('2012-06-13T13:30:10.010')]
assert_array_equal(re, expected)
re = self.s.run("[2012.06.13T13:30:10.008, NULL, 2012.06.13T13:30:10.010]")
expected = [np.datetime64('2012-06-13T13:30:10.008'), np.datetime64('NaT'), np.datetime64('2012-06-13T13:30:10.010')]
assert_array_equal(re, expected)
re = self.s.run("take(timestamp(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_nanotime_vector(self):
re = self.s.run("[13:30:10.008007006, 13:30:10.008007007, 13:30:10.008007008]")
expected = [np.datetime64('1970-01-01T13:30:10.008007006'), np.datetime64('1970-01-01T13:30:10.008007007'), np.datetime64('1970-01-01T13:30:10.008007008')]
assert_array_equal(re, expected)
re = self.s.run("[13:30:10.008007006, NULL, 13:30:10.008007008]")
expected = [np.datetime64('1970-01-01T13:30:10.008007006'), np.datetime64('NaT'), np.datetime64('1970-01-01T13:30:10.008007008')]
assert_array_equal(re, expected)
re = self.s.run("take(nanotime(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_nanotimestamp_vector(self):
re = self.s.run("[2012.06.13T13:30:10.008007006, 2012.06.13T13:30:10.008007007, 2012.06.13T13:30:10.008007008]")
expected = [np.datetime64('2012-06-13T13:30:10.008007006'), np.datetime64('2012-06-13T13:30:10.008007007'), np.datetime64('2012-06-13T13:30:10.008007008')]
assert_array_equal(re, expected)
re = self.s.run("[2012.06.13T13:30:10.008007006, NULL, 2012.06.13T13:30:10.008007008]")
expected = [np.datetime64('2012-06-13T13:30:10.008007006'), np.datetime64('NaT'), np.datetime64('2012-06-13T13:30:10.008007008')]
assert_array_equal(re, expected)
re = self.s.run("take(nanotimestamp(), 3)")
expected = [np.datetime64('NaT'), np.datetime64('NaT'), np.datetime64('NaT')]
assert_array_equal(re, expected)
def test_uuid_vector(self):
re = self.s.run("uuid(['5d212a78-cc48-e3b1-4235-b4d91473ee87', '5d212a78-cc48-e3b1-4235-b4d91473ee88', '5d212a78-cc48-e3b1-4235-b4d91473ee89'])")
expected = ['5d212a78-cc48-e3b1-4235-b4d91473ee87', '5d212a78-cc48-e3b1-4235-b4d91473ee88', '5d212a78-cc48-e3b1-4235-b4d91473ee89']
assert_array_equal(re, expected)
re = self.s.run("uuid(['5d212a78-cc48-e3b1-4235-b4d91473ee87', '', '5d212a78-cc48-e3b1-4235-b4d91473ee89'])")
expected = ['5d212a78-cc48-e3b1-4235-b4d91473ee87', '00000000-0000-0000-0000-000000000000', '5d212a78-cc48-e3b1-4235-b4d91473ee89']
assert_array_equal(re, expected)
re = self.s.run("uuid(['', '', ''])")
expected = ['00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000', '00000000-0000-0000-0000-000000000000']
assert_array_equal(re, expected)
def test_ipaddr_vector(self):
re = self.s.run("ipaddr(['192.168.1.135', '192.168.1.124', '192.168.1.14'])")
expected = ['192.168.1.135', '192.168.1.124', '192.168.1.14']
assert_array_equal(re, expected)
re = self.s.run("ipaddr(['192.168.1.135', '', '192.168.1.14'])")
expected = ['192.168.1.135', '0.0.0.0', '192.168.1.14']
assert_array_equal(re, expected)
re = self.s.run("ipaddr(['', '', ''])")
expected = ['0.0.0.0', '0.0.0.0', '0.0.0.0']
assert_array_equal(re, expected)
def test_int128_vector(self):
re = self.s.run("int128(['e1671797c52e15f763380b45e841ec32', 'e1671797c52e15f763380b45e841ec33', 'e1671797c52e15f763380b45e841ec34'])")
expected = ['e1671797c52e15f763380b45e841ec32', 'e1671797c52e15f763380b45e841ec33', 'e1671797c52e15f763380b45e841ec34']
assert_array_equal(re, expected)
re = self.s.run("int128(['e1671797c52e15f763380b45e841ec32', '', 'e1671797c52e15f763380b45e841ec34'])")
expected = ['e1671797c52e15f763380b45e841ec32', '00000000000000000000000000000000', 'e1671797c52e15f763380b45e841ec34']
assert_array_equal(re, expected)
re = self.s.run("int128(['', '', ''])")
expected = ['00000000000000000000000000000000', '00000000000000000000000000000000', '00000000000000000000000000000000']
assert_array_equal(re, expected)
def test_int_matrix(self):
re = self.s.run("1..6$3:2")
expected = np.array([[1, 4], [2, 5], [3, 6]])
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_short_matrix(self):
re = self.s.run("short(1..6)$3:2")
expected = np.array([[1, 4], [2, 5], [3, 6]])
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_long_matrix(self):
re = self.s.run("long(1..6)$3:2")
expected = np.array([[1, 4], [2, 5], [3, 6]])
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_double_matrix(self):
re = self.s.run("[1.1, 1.2, 1.3, 1.4, 1.5, 1.6]$3:2")
expected = [[1.1, 1.4], [1.2, 1.5], [1.3, 1.6]]
assert_array_almost_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_float_matrix(self):
re = self.s.run("[1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f]$3:2")
expected = [[1.1, 1.4], [1.2, 1.5], [1.3, 1.6]]
assert_array_almost_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_symbol_matrix(self):
re = self.s.run('symbol("A"+string(1..9))$3:3')
expected = np.array([["A1","A4","A7"], ["A2","A5","A8"], ["A3","A6","A9"]])
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_huge_matrix(self):
re = self.s.run('matrix(loop(take{, 3000}, 1..3000))')
expected = np.arange(1, 3001)
for i in np.arange(0, 3000):
assert_array_equal(re[0][i], expected)
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_one_column_matrix(self):
re = self.s.run('matrix(1..3000000)')
for i in np.arange(0, 3000000):
assert_array_equal(re[0][i], [i+1])
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_one_row_matrix(self):
re = self.s.run("matrix(take(1, 5000)).transpose()")
assert_array_equal(re[0], [np.repeat(1, 5000)])
self.assertIsNone(re[1])
self.assertIsNone(re[2])
def test_zero_column_matrix(self):
re = self.s.run("matrix(INT, 3, 0)")
expected = [[] for i in range(3)]
assert_array_equal(re[0], expected)
re = self.s.run("matrix(BOOL,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,bool )
re = self.s.run("matrix(CHAR,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'int8')
re = self.s.run("matrix(SHORT,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'int16')
re = self.s.run("matrix(LONG,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'int64')
re = self.s.run("matrix(DATE,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(MONTH,3,0)")
expected = np.empty((3,0),dtype="datetime64[M]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[M]')
re = self.s.run("matrix(TIME,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(MINUTE,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(SECOND,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DATETIME,3,0)")
expected = np.empty((3,0),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(TIMESTAMP,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,'datetime64[ns]')
re = self.s.run("matrix(NANOTIME,3,0)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(NANOTIMESTAMP,3,0)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(FLOAT,3,0)")
expected = np.empty((3,0),dtype="float32")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DOUBLE,3,0)")
assert_array_equal(re[0], expected)
self.assertEqual(re[0].dtype,"float64")
re = self.s.run("matrix(SYMBOL,3,0)")
assert_array_equal(re[0], expected)
def test_zero_row_matrix(self):
re = self.s.run("matrix(INT, 0, 3)")
expected = np.empty((0,3),dtype="int32")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(BOOL,0,3)")
expected = np.empty((0,3),dtype="bool")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(CHAR,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(SHORT,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(LONG,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DATE,0,3)")
expected = np.empty((0,3),dtype="datetime64[ns]")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(MONTH,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(TIME,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(MINUTE,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(SECOND,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DATETIME,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(TIMESTAMP,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(NANOTIME,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(NANOTIMESTAMP,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(FLOAT,0,3)")
expected = np.empty((0,3),dtype="float32")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(DOUBLE,0,3)")
assert_array_equal(re[0], expected)
re = self.s.run("matrix(SYMBOL,0,3)")
assert_array_equal(re[0], expected)
def test_all_null_matrix(self):
re = self.s.run("take(int(), 12)$3:4")
expected=[[np.NaN, np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN, np.NaN], [np.NaN, np.NaN, np.NaN, np.NaN]]
assert_array_equal(re[0], expected)
re = self.s.run("[1, 2, NULL, 3, NULL, 4]$2:3")
expected=[[1, np.NaN, np.NaN], [2., 3., 4.]]
assert_array_equal(re[0], expected)
re = self.s.run("symbol(take(string(), 12))$3:4")
assert_array_equal(re[0][0], ['','','',''])
assert_array_equal(re[0][1], ['','','',''])
assert_array_equal(re[0][2], ['','','',''])
re = self.s.run("symbol(['AA', 'BB', NULL, 'CC', NULL, 'DD'])$2:3")
assert_array_equal(re[0][0], ['AA','',''])
assert_array_equal(re[0][1], ['BB','CC','DD'])
def test_huge_symbol_matrix(self):
re = self.s.run("m = symbol(string(1..1000000))$200:5000;m.rename!(1..200,1..5000);m")
assert_array_equal(re[1], np.arange(1, 201))
assert_array_equal(re[2], np.arange(1, 5001))
re = self.s.run("m = symbol(string(1..1000000))$200:5000;m.rename!(1..200,1..5000);table(m.rowNames() as label, m)")
assert_array_equal(re["label"], np.arange(1, 201))
j=1
for i in np.arange(1, 5001):
assert_series_equal(re.iloc[:,i], pd.Series([str(x) for x in np.arange(j, j+200)], index=np.arange(0, 200)),check_names=False)
j+=200
def test_int_matrix_with_label(self):
re = self.s.run("cross(add,1..5,1..10)")
expected = np.array(
[[2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
[5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [6, 7, 8, 9, 10, 11, 12, 13, 14, 15]])
#self.assertEqual((re == expected).all(), True)
assert_array_equal(re[0], expected)
assert_array_equal(re[1], np.array([1, 2, 3, 4, 5]))
assert_array_equal(re[2], np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))
def test_matrix_only_with_row_label(self):
re = self.s.run("m=1..6$3:2;m.rename!([0, 1, 2],);m")
expected = [[1, 4], [2, 5], [3, 6]]
assert_array_equal(re[0], expected)
assert_array_equal(re[1], [0, 1, 2])
self.assertIsNone(re[2])
def test_matrix_only_with_col_label(self):
re = self.s.run("m=1..6$3:2;m.rename!([0, 1]);m")
expected = [[1, 4], [2, 5], [3, 6]]
assert_array_equal(re[0], expected)
self.assertIsNone(re[1])
assert_array_equal(re[2], [0, 1])
def test_matrix_label_date_symbol(self):
script='''
m=matrix([2200, 1300, 2500, 8800], [6800, 5400, NULL, NULL], [1900, 2100, 3200, NULL]).rename!(2012.01.01..2012.01.04, symbol(`C`IBM`MS));
m
'''
re = self.s.run(script)
expected=[[2200., 6800.,1900.],[1300.,5400.,2100.],[2500.,np.NaN,3200.],[8800.,np.NaN, np.NaN]]
assert_array_almost_equal(re[0], expected)
assert_array_equal(re[1], np.array(['2012-01-01T00:00:00.000000000', '2012-01-02T00:00:00.000000000','2012-01-03T00:00:00.000000000', '2012-01-04T00:00:00.000000000'], dtype="datetime64"))
assert_array_equal(re[2], ['C', 'IBM', 'MS'])
def test_matrix_label_second_symbol(self):
script='''
m=matrix([2200, 1300, 2500, 8800], [6800, 5400, NULL, NULL], [1900, 2100, 3200, NULL]).rename!([09:30:00, 10:00:00, 10:30:00, 11:00:00], `C`IBM`MS)
m
'''
re = self.s.run(script)
expected=[[2200., 6800.,1900.],[1300.,5400.,2100.],[2500.,np.NaN,3200.],[8800.,np.NaN, np.NaN]]
assert_array_almost_equal(re[0], expected)
assert_array_equal(re[1], np.array(['1970-01-01T09:30:00.000000000', '1970-01-01T10:00:00.000000000','1970-01-01T10:30:00.000000000', '1970-01-01T11:00:00.000000000'], dtype="datetime64"))
assert_array_equal(re[2], ['C', 'IBM', 'MS'])
def test_matrix_label_symbol_date(self):
script='''
m=matrix([2200, 1300, 2500, 8800], [6800, 5400, NULL, NULL], [1900, 2100, 3200, NULL]).rename!(`C`IBM`MS`ZZ, 2012.01.01..2012.01.03)
m
'''
re = self.s.run(script)
expected=[[2200., 6800.,1900.],[1300.,5400.,2100.],[2500.,np.NaN,3200.],[8800.,np.NaN, np.NaN]]
assert_array_almost_equal(re[0], expected)
assert_array_equal(re[1], ['C', 'IBM', 'MS', 'ZZ'])
assert_array_equal(re[2], np.array(['2012-01-01T00:00:00.000000000', '2012-01-02T00:00:00.000000000',
'2012-01-03T00:00:00.000000000'],dtype="datetime64"))
def test_table(self):
script = '''n=20;
syms=`IBM`C`MS`MSFT`JPM`ORCL`BIDU`SOHU`GE`EBAY`GOOG`FORD`GS`PEP`USO`GLD`GDX`EEM`FXI`SLV`SINA`BAC`AAPL`PALL`YHOO`KOH`TSLA`CS`CISO`SUN;
mytrades=table(09:30:00+rand(18000,n) as timestamp,rand(syms,n) as sym, 10*(1+rand(100,n)) as qty,5.0+rand(100.0,n) as price);
select qty,price from mytrades where sym==`IBM;'''
re = self.s.run(script)
self.assertEqual(re.shape[1], 2)
def test_dictionary(self):
script = '''dict(1 2 3,`IBM`MSFT`GOOG)'''
re = self.s.run(script)
expected = {2: 'MSFT', 3: 'GOOG', 1: 'IBM'}
self.assertDictEqual(re, expected)
def test_any_vector(self):
re = self.s.run("([1], [2],[1,3, 5],[0.9, 0.8])")
self.assertEqual((re[0] == [1]).all(), True)
self.assertEqual((re[1] == [2]).all(), True)
self.assertEqual((re[2] == [1, 3, 5]).all(), True)
def test_set(self):
re = self.s.run("set(1+3*1..3)")
self.assertSetEqual(re, {10, 4, 7})
def test_pair(self):
re = self.s.run("3:4")
self.assertListEqual(re, list([3, 4]))
def test_any_dictionary(self):
re = self.s.run("{a:1,b:2}")
expected = {'a': 1, 'b': 2}
self.assertDictEqual(re, expected)
def test_upload_matrix(self):
a = self.s.run("cross(+, 1..5, 1..5)")
b = self.s.run("1..25$5:5")
self.s.upload({'a': a, 'b': b})
re = self.s.run('a+b')
# print(re)
# self.assertEqual((re[0] == [3, 9, 15, 21, 27]).all(), True)
# self.assertEqual((re[1] == [5, 11, 17, 23, 29]).all(), True)
# self.assertEqual((re[2] == [7, 13, 19, 25, 31]).all(), True)
# self.assertEqual((re[3] == [9, 15, 21, 27, 33]).all(), True)
# self.assertEqual((re[4] == [11, 17, 23, 29, 35]).all(), True)
def test_run_plot(self):
script = '''
x=1..10
t = table(x as sin, x+100 as cos)
plot(t)
'''
re = self.s.run(script)
assert_array_equal(re['data'][0], np.array([[1, 101], [2, 102], [3, 103], [4, 104], [5, 105], [6, 106], [7, 107], [8, 108], [9, 109], [10, 110]]))
self.assertIsNone(re['data'][1])
assert_array_equal(re['data'][2], np.array(['sin', 'cos']))
assert_array_equal(re['title'], np.array(['', '', '']))
def test_table_datatypes(self):
script='''
n = 200
a = 100
v1 = string(1..n)
v2 = string(1..n)
v3 = take(int128("fcc69bca9885b51962660c23d08c124a"),n-a).join(take(int128("a428d55098d8e41e8adc4b7d04d8ede1"),a))
v4 = take(uuid("407c628e-d319-25c1-17ee-e5a73500a010"),n-a).join(take(uuid("d7a39280-1b18-8f56-160c-beabd428c934"),a))
v5 = take(ipaddr("fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"),n-a).join(take(ipaddr("fc00:db20:35b:7399::5"),a))
t = table(n:n,`val1`val2`val3`val4`val5,[SYMBOL,STRING,INT128,UUID,IPADDR])
t[`val1] = v1
t[`val2] = v2
t[`val3] = v3
t[`val4] = v4
t[`val5] = v5
'''
self.s.run(script)
df1 = self.s.run("select val1 from t")
df2 = self.s.run("select val2 from t")
df3 = self.s.run("select val3 from t")
df4 = self.s.run("select val4 from t")
df5 = self.s.run("select val5 from t")
df = self.s.run("select * from t")
n = 200
a = 100
data1 = np.array(range(1,n+1),dtype="str")
data2 = np.append(np.repeat("fcc69bca9885b51962660c23d08c124a",n-a),np.repeat("a428d55098d8e41e8adc4b7d04d8ede1",a))
data3 = np.append(np.repeat("407c628e-d319-25c1-17ee-e5a73500a010",n-a),np.repeat("d7a39280-1b18-8f56-160c-beabd428c934",a))
data4 = np.append(np.repeat("fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",n-a),np.repeat("3fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",a))
ex1 = pd.DataFrame({"val1":data1})
ex2 = pd.DataFrame({"val2":data1})
ex3 = pd.DataFrame({"val3":data2})
ex4 = pd.DataFrame({"val4":data3})
ex5 = pd.DataFrame({"val5":data4})
ex = pd.DataFrame({"val1":data1,"val2":data1,"val3":data2,"val4":data3,"val5":data4})
assert_frame_equal(df1, ex1)
assert_frame_equal(df2, ex2)
assert_frame_equal(df3, ex3)
assert_frame_equal(df4, ex4)
assert_frame_equal(df5, ex5)
assert_frame_equal(df, ex)
def test_table_datatypes_with_null(self):
script='''
n = 100
a = 50
v1=string(1..(n-a)).join(take(string(),a))
v2 = v1
v3 = take(int128("fcc69bca9885b51962660c23d08c124a"),n-a).join(take(int128(),a))
v4 = take(uuid("407c628e-d319-25c1-17ee-e5a73500a010"),n-a).join(take(uuid(),a))
v5 = take(ipaddr("fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"),n-a).join(take(ipaddr(),a))
t = table(n:n,`val1`val2`val3`val4`val5,[SYMBOL,STRING,INT128,UUID,IPADDR])
t[`val1] = v1
t[`val2] = v2
t[`val3] = v3
t[`val4] = v4
t[`val5] = v5
'''
self.s.run(script)
df1 = self.s.run("select val1 from t")
df2 = self.s.run("select val2 from t")
df3 = self.s.run("select val3 from t")
df4 = self.s.run("select val4 from t")
df5 = self.s.run("select val5 from t")
df = self.s.run("select * from t")
n = 100
a = 50
arr1 = np.append(np.array(range(1,n-a+1),dtype="str"),np.repeat("",a))
arr2 = np.append(np.repeat("fcc69bca9885b51962660c23d08c124a",n-a),np.repeat("00000000000000000000000000000000",a))
arr3 = np.append(np.repeat("407c628e-d319-25c1-17ee-e5a73500a010",n-a),np.repeat("00000000-0000-0000-0000-000000000000",a))
arr4 = np.append(np.repeat("4fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b",n-a),np.repeat("0.0.0.0",a))
ex1 = pd.DataFrame(arr1,columns=["val1"])
ex2 = pd.DataFrame(arr1,columns=["val2"])
ex3 = | pd.DataFrame(arr2,columns=["val3"]) | pandas.DataFrame |
import sys
from typing import List
import pandas as pd
# Note: must mark "common" as "Sources Root" in PyCharm to have visibility
from common_paths import *
from utilities_cbc import read_excel_or_csv_path, debug_write_raw_text, circle_abbrev_from_path
from text_extractor import TextExtractorFactory
from input_files_context import InputFilesContext
from text_transform import pre_process_line, secondary_species_processing
from local_translation_context import LocalTranslationContext
from taxonomy import Taxonomy
# from nlp_context import NLPContext
from parameters import Parameters
from taxonomy_token_identify import TaxonomyTokenIdentify
from spacy.tokens import Span
from spacy.util import filter_spans
from spacy_extra import filter_to_possibles, \
write_visualization
from write_final_checklist import write_local_checklist_with_group, \
write_final_checklist_spreadsheet
from write_categorized_lines import write_categorized_lines_spreadsheet
from write_basic_spreadsheet import write_basic_spreadsheet
# 🧭 U+1F9ED Compass Emoji (use for ranged)
# 🎂 U+1F382 Birthday Cake (use for Adult/Immature)
sys.path.append('common')
sys.path.append('textextractor')
sys.path.append('taxonomy')
emoji_compass = '\U0001f9ed'
emoji_birthday_cake = '\U0001f382'
def load_rarities_text(rarities_path: Path) -> List[str]:
# Check for a rarities list
rare_species = []
if rarities_path.exists():
with open(rarities_path, 'r') as fp:
lines = fp.read()
rare_species = lines.split('\n')
return rare_species
def process_rarities(checklist: pd.DataFrame, rare_species: List[str]) -> pd.DataFrame:
# Mark rarities
if rare_species:
rare_idxs = checklist.index[checklist['CommonName'].isin(rare_species)]
if len(rare_idxs):
checklist.at[rare_idxs, 'Rare'] = 'X'
return checklist
def process_annotations(checklist: pd.DataFrame, annotations_path: Path) -> pd.DataFrame:
# The set operations below are because not all annotations files will have all columns
annotations = load_annotations(annotations_path)
if not annotations.empty:
# rare_mask = [xs == 'X' for xs in annotations['Rare'].values]
# rare_species = list(annotations[rare_mask].CommonName.values)
# if rare_species:
# rare_idxs = local_checklist.index[local_checklist['CommonName'].isin(rare_species)]
# if len(rare_idxs):
# local_checklist.at[rare_idxs, 'Rare'] = 'X'
emd_cols = {'Easy', 'Marginal', 'Difficult'} & set(annotations.columns)
if any([[xs == 'X' for xs in annotations[col].values] for col in
emd_cols]):
checklist['D'] = ''
annotations_effort = {}
for ix, row in annotations.iterrows():
annotations_effort[row['CommonName']] = row['Difficulty']
difficulty = [annotations_effort.get(cn, '') for cn in checklist.CommonName]
checklist['Difficulty'] = difficulty
# Add new annotation columns to local_checklist
adim_cols = {'Adult', 'Immature', 'W-morph',
'B-Morph', 'CountSpecial'} & set(annotations.columns)
for col in adim_cols:
if any([xs == 'X' for xs in annotations[col].values]):
checklist[col] = ''
checklist['Ad'] = ''
checklist['Im'] = ''
checklist['CountSpecial'] = ''
rare_adim_cols = {'Rare', 'Adult', 'Immature',
'W-morph', 'B-Morph', 'CountSpecial'} & set(annotations.columns)
for col in rare_adim_cols:
mask = [xs == 'X' for xs in annotations[col].values]
related_species = list(annotations[mask].CommonName.values)
if related_species:
species_idxs = checklist.index[
checklist['CommonName'].isin(related_species)]
if len(species_idxs):
checklist.at[species_idxs, col] = 'X'
# Overload the Difficulty field with the ranging field
if 'Ranging' in annotations.columns:
mask = [xs == 'X' for xs in annotations['Ranging'].values]
related_species = list(annotations[mask].CommonName.values)
if related_species:
species_idxs = checklist.index[
checklist['CommonName'].isin(related_species)]
if len(species_idxs):
checklist.at[species_idxs, 'D'] = emoji_compass
return checklist
def process_annotations_or_rarities(checklist: pd.DataFrame,
checklist_path: Path,
circle_prefix: str) -> pd.DataFrame:
"""
Look for Annotations or Rarities files and mark the 'Rare' column in checklist
with an 'X'
Annotations.xlsx must have these columns:
Rarities.xlsx (or CSV) requires 'CommonName' and 'Rare' columns
Rarities.txt is just a text list of rare species
:param circle_prefix:
:param checklist:
:param checklist_path: full path for checklist. Used to construct names for inputs
:return: checklist with 'Rare' column set to 'X' if species is rare
"""
# Process annotations. The XXXX-LocalAnnotations.xlsx file will be preferred over
# the rarities list if it exists
annotations_dir = checklist_path.parent
annotations_path = annotations_dir / f'{circle_prefix}Annotations.xlsx'
print(f'Annotations path: {annotations_path}')
if annotations_path.exists():
return process_annotations(checklist, annotations_path)
for ext in ['xlsx', 'csv', 'txt']:
rarities_path = annotations_dir / f'{circle_prefix}Rarities.{ext}'
if not rarities_path.exists():
continue
if ext == 'txt':
rare_species = load_rarities_text(rarities_path)
else:
rarities_df = read_excel_or_csv_path(rarities_path)
rare_species = list(rarities_df[rarities_df.Rare == 'X'].CommonName.values)
checklist = process_rarities(checklist, rare_species)
break
return checklist
def process_exceptions(candidate_names: List[str], checklist_path: Path,
circle_prefix: str) -> List[str]:
# checklist_path = inputs_parse_path / 'CAPA-checklist.xlsx' # only care about path and prefix
exceptions_dir = checklist_path.parent
exceptions_path = exceptions_dir / f'{circle_prefix}Exceptions.xlsx'
print(f'Exceptions path: {exceptions_path}')
if not exceptions_path.exists():
return candidate_names
print(f'Exceptions: {exceptions_path}')
exceptions_df = read_excel_or_csv_path(exceptions_path)
if exceptions_df.empty:
return candidate_names
mask_add = exceptions_df.Add == 'X'
mask_sub = exceptions_df.Subtract == 'X'
additions = set(exceptions_df[mask_add].CommonName.values)
subtractions = set(exceptions_df[mask_sub].CommonName.values)
addstr = ', '.join(additions)
subst = ', '.join(subtractions)
print(f'Additions: {addstr}\nSubtractions: {subst}')
local_names = list((set(candidate_names) | additions) - subtractions)
return local_names
def build_full_tally_sheet(double_translated,
fpath: Path,
taxonomy: Taxonomy,
parameters: Parameters,
circle_prefix: str):
candidate_names = [x for x, y in double_translated]
local_names = process_exceptions(candidate_names, fpath, circle_prefix)
# if issf etc in list, then base species must be also
issfs = taxonomy.filter_issf(local_names)
for cn in issfs:
base_species = taxonomy.report_as(cn)
if base_species:
local_names.append(base_species)
entries = []
for local_name in local_names:
# common_name, taxon_order, species_group, NACC_SORT_ORDER
record = taxonomy.find_local_name_row(local_name)
if record is not None:
# e.g. ('White-throated Sparrow', 31943, 'New World Sparrows', 1848.0)
entry = (record.comName, record.TAXON_ORDER, record.SPECIES_GROUP,
record.NACC_SORT_ORDER, record.ABA_SORT_ORDER, '', 0) # append 'Rare', 'Total'
entries.append(entry)
df = pd.DataFrame(entries, columns=['CommonName', 'TaxonOrder', 'Group',
'NACC_SORT_ORDER', 'ABA_SORT_ORDER', 'Rare', 'Total'])
# Re-order
cols = ['Group', 'CommonName', 'Rare', 'Total', 'TaxonOrder',
'NACC_SORT_ORDER', 'ABA_SORT_ORDER']
local_checklist = df[cols]
local_checklist.sort_values(by='TaxonOrder', inplace=True)
# local_checklist.shape
# double_translated may have duplicates
local_checklist = local_checklist[
~local_checklist.duplicated(subset=['CommonName'], keep='first')]
local_checklist = process_annotations_or_rarities(local_checklist, fpath, circle_prefix)
# Re-order columns
preferred_order = ['Group', 'CommonName', 'Rare', 'D', 'Total', 'Ad', 'Im',
'TaxonOrder', 'NACC_SORT_ORDER', 'ABA_SORT_ORDER', 'Difficulty',
'Adult', 'Immature', 'W-morph', 'B-Morph', 'CountSpecial']
newcols = [col for col in preferred_order if col in local_checklist.columns]
local_checklist = local_checklist[newcols]
# Write out full tally sheet
# circle_code = circle_prefix[0:4]
# double_path = outputs_path / f'{circle_code}-DoubleX.xlsx'
# write_local_checklist_with_group(local_checklist, double_path, parameters.parameters)
return local_checklist
def strip_off_scientific_names(text_list: List[str], taxonomy: Taxonomy) -> List[str]:
# The CAMP-2020 checklist has <Common Name> <Scientific Name>
# Assume all scientific names are two words and drop
stripped_text_list = []
for line in text_list:
line = line.strip()
# e.g. line = 'California Quail Callipepla californica'
words = line.split(' ')
if len(words) > 2:
sci_name = ' '.join(words[-2:]).lower()
row = taxonomy.find_scientific_name_row(sci_name)
if row is not None:
line = ' '.join(words[:-2]) #.lower()
stripped_text_list.append(line)
return stripped_text_list
def process_checklist(checklist_path: Path,
output_dir: Path,
taxonomy: Taxonomy,
local_translation_context: LocalTranslationContext,
parameters: Parameters,
circle_prefix: str
):
"""
- Extract text
"""
# Use circle_abbrev as a prefix to distinguish output for multiple checklists
# Extract text from file and do basic text preprocessing
text_extractor = TextExtractorFactory().create(checklist_path)
text = text_extractor.extract()
debug_write_raw_text(text, checklist_path, debug_path)
text_list = sorted(list(set(text.split('\n'))))
# skip tertiary_transformation() for now
text_list = [secondary_species_processing(pre_process_line(line)) for line in text_list]
# text_list = [tertiary_transformation(secondary_species_processing(pre_process_line(line))) \
# for line in text_list]
text_list = strip_off_scientific_names(text_list, taxonomy)
# print(text_list)
text_list = sorted(list(set(text_list)))
# Processing 1 checklist here
tti = TaxonomyTokenIdentify(taxonomy, cache_path)
# use text_list from above
text_list_lower = [x.lower() for x in text_list]
possibles = filter_to_possibles(tti, text_list_lower)
print(f'Possible species lines: {len(possibles)} (based on word intersections)')
# Double translate
# print('Doing double translation') # Can take a while
translated = []
for line in text_list_lower: # was: possibles
txline = local_translation_context.apply_translations(line.lower(), True)
translated.append(txline)
double_translated = []
for line, _ in translated:
txline2 = local_translation_context.apply_translations(line.lower(), True)
double_translated.append(txline2)
# Write Spacy visualization
write_visualization(list(set([x[0] for x in double_translated])), checklist_path, debug_path,
taxonomy,
tti)
# -------
local_checklist = build_full_tally_sheet(double_translated,
checklist_path, taxonomy,
parameters, circle_prefix)
cols_to_hide = ['Rare', 'Adult', 'Immature', 'W-morph', 'B-Morph', 'Difficulty', 'CountSpecial']
# The first checklist we write has a single column for group and is
# used as the template for the Service-ProcessEBird phase
# don't use circle_prefix here
circle_abbrev = circle_abbrev_from_path(checklist_path)
single_path = output_dir / f'{circle_abbrev}-Single.xlsx'
write_final_checklist_spreadsheet(local_checklist,
single_path,
parameters.parameters,
additional_sheets=None,
cols_to_hide=cols_to_hide,
cols_to_highlight=['Total'])
# Write out an empty annotations file if none exists
annotations_path = inputs_parse_path / f'{circle_prefix}Annotations.xlsx'
if not annotations_path.exists():
print(f'Creating empty annotations file: {annotations_path.as_posix()}')
annotations = local_checklist.copy()
for col in ['Rare', 'Adult', 'Immature', 'Easy', 'Marginal', 'Difficult']:
annotations[col] = ''
write_final_checklist_spreadsheet(annotations,
annotations_path,
parameters.parameters,
additional_sheets=None,
cols_to_hide=None,
cols_to_highlight=None)
exceptions_path = inputs_parse_path / f'{circle_prefix}Exceptions.xlsx'
if not exceptions_path.exists():
print(f'Creating empty exceptions file: {exceptions_path.as_posix()}')
empty_exceptions = pd.DataFrame(
{'CommonName': '', 'Add': '', 'Subtract': '', 'Comments': ''},
index=range(20)) # Adding rows to a table is a pain in Excel, give some room
write_basic_spreadsheet(empty_exceptions, exceptions_path,
column_widths={'CommonName': 30, 'Add': 11,
'Subtract': 11, 'Comments': 50},
columns_to_center=['Add', 'Subtract'])
double_path = output_dir / f'{circle_abbrev}-Double.xlsx'
write_local_checklist_with_group(local_checklist, double_path, parameters.parameters)
ground_truths_df = ground_truth_for_code(circle_abbrev)
if not ground_truths_df.empty:
_ = check_against_ground_truth(local_checklist, ground_truths_df)
categorized_lines = categorize_lines(circle_abbrev, text_list,
local_translation_context, tti)
write_categorized_lines_spreadsheet(categorized_lines,
debug_path / f'{circle_abbrev}-categorized_lines.xlsx',
col_widths=[40, 40, 11, 16],
col_align=['left', 'left', 'center', 'center'],
sheet_name='Categorized Lines',
)
return text_list, double_translated, local_checklist
# ------------------------------------------------------------------------------------------
def process_checklists(checklists_path: Path,
output_dir: Path,
taxonomy: Taxonomy,
local_translation_context: LocalTranslationContext,
parameters: Parameters,
circle_prefix: str # e.g. 'CACR-2020-'
):
# Return parameters useful when debugging single list
# parsable_filetypes = TextExtractorFactory().formats()
ifc = InputFilesContext(checklists_path, ['.xlsx', '.csv', '.pdf'])
checklist_paths = ifc.allowable_files(f'{circle_prefix}checklist')
print(f'Path: {checklists_path}')
# - Extract text from tally sheet (checklist)
# - Make LocalTranslationContext and TaxonomyTokenIdentify objects
# - Do a double translation (should be idempotent)
text_list = []
double_translated = []
local_checklist = | pd.DataFrame() | pandas.DataFrame |
import time
import requests
import json
import re
import pandas as pd
from selenium import webdriver
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
# 判断是不是正确的json格式数据
def is_json(html):
try:
json.loads(html)
except ValueError:
return False
return True
# 模拟谷歌浏览器
def get_html(url):
ua = UserAgent() # 调用UserAgent库生成ua对象
header = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/web,image/png,*/*;q=0.8',
'user-agent': ua.random,
}
try:
r = requests.get(url, timeout=30, headers=header)
r.raise_for_status()
r.encoding = r.apparent_encoding # 指定编码形式
return r.text
except:
return "please inspect your url or setup"
URL_list = []
driver = webdriver.Chrome()
def connent_url(location):
time.sleep(3)
url = 'https://las.cnas.org.cn/LAS/publish/externalQueryL1.jsp'
driver.get(url)
orgAddress = driver.find_elements_by_id('orgAddress')
orgAddress[0].send_keys(location)
btn = driver.find_element_by_class_name('btn')
btn.click()
time.sleep(5)
accept = False
while not accept:
try:
pirlbutton1 = driver.find_element_by_xpath('//*[@id="pirlbutton1"]')
print('Login')
pirlbutton1.click()
time.sleep(3)
flag_str = driver.find_element_by_id('pirlAuthInterceptDiv_c').is_displayed()
print(flag_str)
if not flag_str:
break
except ValueError:
accept = False
time.sleep(3)
flag_str = driver.find_element_by_xpath('/html/body/div[7]/div[1]/div[1]').is_displayed()
if not flag_str:
maxpage = int(driver.find_element_by_id('yui-pg0-0-totalPages-span').text)
for page in range(maxpage):
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
tbody_all = soup.find_all('tbody')
for url in tbody_all[1].find_all('a'):
str1 = str(url)
str2 = "https://las.cnas.org.cn" + str1[48:117]
if len(str2) == 92:
URL_list.append(str2)
if page <= maxpage-2:
print(page)
page_next = driver.find_element_by_xpath('/html/body/div[6]/table/tbody/tr/td[4]/a/img')
page_next.click()
time.sleep(3)
accept = False
while not accept:
try:
pirlbutton1 = driver.find_element_by_xpath('//*[@id="pirlbutton1"]')
pirlbutton1.click()
time.sleep(3)
flag_str = driver.find_element_by_id('pirlAuthInterceptDiv_c').is_displayed()
if not flag_str:
break
except UnicodeDecodeError:
accept = False
time.sleep(3)
# 解析目标网页的html
First_url_Json = []
Second_url_Json = []
Third_url_Json = []
Forth_url_Json = []
Datalist = []
def get_information_from_url(url):
text = get_html(url)
soup = BeautifulSoup(text, "html.parser") # 解析text中的HTML
first_url_Json = []
second_url_Json = []
third_url_Json = []
forth_url_Json = []
for a_data in soup.find_all('a'):
a_data = str(a_data)
onclick_list = re.findall(".*onclick=(.*)'/LAS.*", a_data)
onclick_str = ''
for onclick in onclick_list:
onclick_str = onclick_str + onclick
onclick_str = onclick_str[1:-1]
type_data1 = a_data[205:207]
type_data2 = a_data[201:203]
if type_data1 == "L1" or type_data2 == "L1":
if onclick_str == "_showTop":
id_list = re.findall(".*evaluateId=(.*)&labType.*", a_data)
id_str = ''
for id in id_list:
id_str = id_str + id
first_url = 'https://las.cnas.org.cn/LAS/publish/queryPublishKeyBranchY.action?&asstId=' + id_str
second_url = 'https://las.cnas.org.cn/LAS/publish/queryPublishSignatoryY.action?&evaluateId=' + id_str
third_url = 'https://las.cnas.org.cn/LAS/publish/queryPublishLCheckObjY.action?&evaluateId=' \
+ id_str + '&type=L1'
if len(first_url_Json) == 0:
first_url_Json.append(first_url)
if len(second_url_Json) == 0:
second_url_Json.append(second_url)
if len(third_url_Json) == 0:
third_url_Json.append(third_url)
elif onclick_str == "_showdown":
id_list = re.findall(".*baseInfoId=(.*)&labType.*", a_data)
id_str = ''
for id in id_list:
id_str = id_str + id
first_url = 'https://las.cnas.org.cn/LAS/publish/queryPublishKeyBranch.action?&asstId=' + id_str
second_url = 'https://las.cnas.org.cn/LAS/publish/queryPublishSignatory.action?&baseinfoId=' + id_str
third_url = 'https://las.cnas.org.cn/LAS/publish/queryPublishLCheckObj.action?&baseinfoId=' \
+ id_str + '&type=L1'
if len(first_url_Json) == 0:
first_url_Json.append(first_url)
if len(second_url_Json) == 0:
second_url_Json.append(second_url)
if len(third_url_Json) == 0:
third_url_Json.append(third_url)
elif type_data1 == "L2" or type_data2 == "L2":
if onclick_str == '_showTop' and (len(a_data and "L1") > 0):
id_list = re.findall(".*evaluateId=(.*)&labType.*", a_data)
id_str = ''
for id in id_list:
id_str = id_str + id
first_url = 'https://las.cnas.org.cn/LAS/publish/queryPublishKeyBranchY.action?&asstId=' + id_str
second_url = 'https://las.cnas.org.cn/LAS/publish/queryPublishSignatoryY.action?&evaluateId=' + id_str
third_url = 'https://las.cnas.org.cn/LAS/publish/queryPublishLCheckObjY.action?&evaluateId=' \
+ id_str + '&type=L1'
forth_url = 'https://las.cnas.org.cn/LAS/publish/queryPublishLCailObjY.action?&evaluateId=' \
+ id_str + '&type=L2'
if len(first_url_Json) == 0:
first_url_Json.append(first_url)
if len(second_url_Json) == 0:
second_url_Json.append(second_url)
if len(third_url_Json) == 0:
third_url_Json.append(third_url)
if len(forth_url_Json) == 0:
forth_url_Json.append(forth_url)
elif onclick_str == '_showdown':
id_list = re.findall(".*baseInfoId=(.*)&labType.*", a_data)
id_str = ''
for id in id_list:
id_str = id_str + id
first_url = 'https://las.cnas.org.cn/LAS/publish/queryPublishKeyBranch.action?&asstId=' + id_str
second_url = 'https://las.cnas.org.cn/LAS/publish/queryPublishSignatory.action?&baseinfoId=' + id_str
third_url = 'https://las.cnas.org.cn/LAS/publish/queryPublishLCailObj.action?&baseinfoId=' \
+ id_str + '&type=L2'
if len(first_url_Json) == 0:
first_url_Json.append(first_url)
if len(second_url_Json) == 0:
second_url_Json.append(second_url)
if len(third_url_Json) == 0:
third_url_Json.append(third_url)
data = []
div_data = [div.get_text().strip().replace('\r', '').replace('\t', '').replace('\n', '').replace('\x1b', '')
for div in soup.find_all('div', class_='T1')]
for colomns_data in div_data: # 遍历机构名称
first_url_Json.append(colomns_data)
second_url_Json.append(colomns_data)
third_url_Json.append(colomns_data)
forth_url_Json.append(colomns_data)
data.append(colomns_data)
for tr_data in soup.find_all('tr'):
for span_data in tr_data.find_all('span', class_='clabel'):
span_data = span_data.get_text().strip().replace('\x1b', '') # 抽取表格内的数据
data.append(span_data)
if len(data) < 13:
data_len = 13 - len(data)
for num in range(data_len):
data.append('')
elif len(data) > 13: # 删除非当前表格内的数据
data_len = len(data) - 13
for num in range(data_len):
data.pop(-1) # 从list尾部删除
Datalist.append(data)
if len(data) >= 9:
span_str = str(data[10]+' - '+data[11]).replace('\r', '').replace('\n', '').replace('\t', '') # 认证日期时间段
else:
span_str = ''
first_url_Json.append(span_str)
second_url_Json.append(span_str)
third_url_Json.append(span_str)
forth_url_Json.append(span_str)
if len(first_url_Json[0]) > 100:
First_url_Json.append(first_url_Json)
if len(second_url_Json[0]) > 100:
Second_url_Json.append(second_url_Json)
if len(third_url_Json[0]) > 100:
Third_url_Json.append(third_url_Json)
if len(forth_url_Json[0]) > 100:
Forth_url_Json.append(forth_url_Json)
First_Data = []
def get_first_data(url):
text = get_html(url[0])
soup = BeautifulSoup(text, "html.parser") # 解析text中的HTML
html = soup.text
flag_str = is_json(html)
if flag_str is True:
json_data = json.loads(html)
else:
json_data = {"data": []}
infos = json_data['data']
for info in infos:
try:
if len(url) == 3:
zuzhi_name = str(url[1]).strip()
date_str = str(url[2]).strip()
else:
zuzhi_name = str(url[2]).strip()
date_str = str(url[3]).strip()
data_list = []
data_list.append(zuzhi_name)
data_list.append(date_str)
# 地址代码
try:
keyNum = chr(info['keyNum'] + 64)
except:
keyNum = ""
if keyNum is not None:
data_list.append(keyNum)
else:
data_list.append('')
# 地址
try:
addCn = info['addCn'].strip().replace('\r', '').replace('\n', '').replace('\t', '').replace('\x1b', '')
except:
addCn = ''
if addCn is not None:
data_list.append(addCn)
else:
data_list.append('')
# 邮编
try:
postCode = info['postCode'].strip().replace('\x1b', '')
except:
postCode = ""
if postCode is not None:
data_list.append(postCode)
else:
data_list.append('')
# 设施特点labFeatureJson
try:
labFeatureJson = str(info['labFeatureJson'].replace('\x1b', ''))
except:
labFeatureJson = ""
labFeatureJson = json.loads(labFeatureJson)
feature_str = ""
for feature_json in labFeatureJson:
feature_str = feature_str + feature_json['feature'] + ','
feature_str = feature_str[:-1]
data_list.append(feature_str)
# 主要活动mainactivity
try:
mainactivity = info['mainactivity'].strip().replace('\x1b', '')
except:
mainactivity = ""
if mainactivity is not None:
data_list.append(mainactivity)
else:
data_list.append('')
#说明remark
try:
remark = info['remark'].strip().replace('\x1b','')
except:
remark = ""
if remark is None:
data_list.append('')
else:
data_list.append(remark)
# 是否推荐primaryRecommend
try:
primaryRecommend = info['primaryRecommend'].replace('\x1b', '')
except:
primaryRecommend = ""
if primaryRecommend == '1':
data_list.append('是')
else:
data_list.append('')
# 状态bstatus
try:
isModify = info['isModify'].replace('\x1b', '')
except:
isModify = ""
if isModify == '1':
data_list.append("新增")
else:
data_list.append("有效")
First_Data.append(data_list)
except:
sss = '本次爬取信息失败'
print(sss)
pass
Second_Data = []
def get_second_data(url):
text = get_html(url[0])
soup = BeautifulSoup(text, "html.parser") # 解析text中的HTML
html = soup.text
flag_str = is_json(html)
if flag_str is True:
json_data = json.loads(html)
else:
json_data = {"data": []}
infos = json_data['data']
for info in infos:
try:
if len(url) == 3:
zuzhi_name = str(url[1]).strip()
date_str = str(url[2]).strip()
else:
zuzhi_name = str(url[2]).strip()
date_str = str(url[3]).strip()
data_list = []
data_list.append(zuzhi_name)
data_list.append(date_str)
# 序号
try:
num = info['num']
except:
num = ""
if num is not None:
data_list.append(num)
else:
data_list.append('')
# 姓名
try:
nameCh = info['nameCh'].strip().replace('\x1b', '')
except:
nameCh = ''
if nameCh is not None:
data_list.append(nameCh)
else:
data_list.append('')
# 授权签字领域
try:
authorizedFieldCh = info['authorizedFieldCh'].strip().replace('\x1b', '')
except:
authorizedFieldCh = ""
if authorizedFieldCh != None:
data_list.append(authorizedFieldCh)
else:
data_list.append('')
# 说明
try:
note = info['note'].strip().replace('\r', '').replace('\n', '').replace('\t', '').replace('\x1b', '')
except:
note = ''
if note is None:
data_list.append('')
data_list.append(note)
#是否推荐recommend
try:
recommend = info['recommend'].replace('\x1b', '')
except:
recommend = ""
if recommend == '1':
data_list.append('是')
else:
data_list.append('')
# 状态bstatus
try:
isModify = info['isModify'].replace('\x1b', '')
except:
isModify = ""
if isModify == '1':
data_list.append("新增")
else:
data_list.append("有效")
Second_Data.append(data_list)
except:
sss = '本次爬取信息失败'
print(sss)
pass
Third_Data = []
def get_third_data(url):
text = get_html(url[0])
soup = BeautifulSoup(text, "html.parser") # 解析text中的HTML
html = soup.text
flag_str = is_json(html)
if flag_str is True:
json_data = json.loads(html)
else:
json_data = {"data": []}
infos = json_data['data']
for info in infos:
try:
if len(url) == 3:
zuzhi_name = str(url[1]).strip()
date_str = str(url[2]).strip()
else:
zuzhi_name = str(url[2]).strip()
date_str = str(url[3]).strip()
data_list = []
data_list.append(zuzhi_name)
data_list.append(date_str)
# 分组名称
try:
typeName = info['typeName']
except:
typeName = ""
if typeName is None:
typeName = '未分组'
typeName.strip()
data_list.append(typeName)
# 检验对象序号
try:
num = info['num']
except:
num = ""
if num is not None:
data_list.append(str(num))
else:
data_list.append('')
#检验对象
try:
objCh = info['objCh'].strip().replace('\r', '').replace('\n',
'').replace('\t', '').replace('\x1b', '')
except:
objCh = ''
if objCh is not None:
data_list.append(objCh)
else:
data_list.append('')
# 检验项目名称序号
try:
paramNum = info['paramNum']
except:
paramNum = " "
if paramNum is not None:
data_list.append(str(paramNum))
else:
data_list.append('')
# 检验项目名称
try:
paramCh = info['paramCh'].strip().replace('\r', '').replace('\n',
'').replace('\t', '').replace('\x1b', '')
except:
paramCh = ""
if paramCh is not None:
data_list.append(paramCh)
else:
data_list.append('')
# 依据的检测标准
try:
stdAllDesc = info['stdAllDesc'].strip().replace('\x1b', '')
except:
stdAllDesc = ""
if stdAllDesc is not None:
data_list.append(stdAllDesc)
else:
data_list.append('')
#说明
try:
limitCh = info['limitCh'].strip().replace('\r', '').replace('\n', '').replace('\t',
'').replace('\x1b', '')
except:
limitCh = ''
if limitCh is None:
data_list.append('')
data_list.append(limitCh)
# 状态stdStatus
try:
stdStatus = info['stdStatus'].strip().replace('\x1b', '')
except:
stdStatus = ''
if stdStatus == '0':
stdStatus = "有效"
else:
stdStatus = "新增"
data_list.append(stdStatus)
Third_Data.append(data_list)
except:
sss = '本次爬取信息失败'
print(sss)
pass
Forth_Data = []
def get_forth_data(url):
text = get_html(url[0])
soup = BeautifulSoup(text, "html.parser") # 解析text中的HTML
html = soup.text
flag_str = is_json(html)
if flag_str is True:
json_data = json.loads(html)
else:
json_data = {"data": []}
infos = json_data['data']
for info in infos:
try:
if len(url) == 3:
zuzhi_name = str(url[1]).strip()
date_str = str(url[2]).strip()
else:
zuzhi_name = str(url[2]).strip()
date_str = str(url[3]).strip()
data_list = []
data_list.append(zuzhi_name)
data_list.append(date_str)
# 分组名称
try:
typeName = info['typeName']
except:
typeName = ""
if typeName is None:
typeName = '未分组'
typeName.strip()
data_list.append(typeName)
# 序号objNum
try:
objNum = info['objNum']
except:
objNum = ""
if objNum is not None:
data_list.append(str(objNum))
else:
data_list.append('')
# 测量仪器名称objCh
try:
objCh = info['objCh'].strip().replace('\r', '').replace('\n',
'').replace('\t', '').replace('\x1b', '')
except:
objCh = ''
if objCh is not None:
data_list.append(objCh)
else:
data_list.append('')
# 被测量项目名称paramCh
try:
paramCh = info['paramCh'].strip().replace('\x1b','')
except:
paramCh = ""
if paramCh is not None:
data_list.append(paramCh)
else:
data_list.append('')
# 校准范围standardCodeStr
try:
standardCodeStr = info['standardCodeStr'].strip().replace('\x1b', '')
except:
standardCodeStr = ""
if standardCodeStr is not None:
data_list.append(standardCodeStr)
else:
data_list.append('')
# 测量范围testCh
try:
testCh = info['testCh'].strip().replace('\x1b', '')
except:
testCh = ''
if testCh is not None:
data_list.append(testCh)
else:
data_list.append('')
# 扩展不确度kvalueTag kvalueCh
try:
kvalueTag = info['kvalueTag'].strip().replace('\x1b', '')
kvalueCh = info['kvalueCh'].strip().replace('\x1b', '')
except:
kvalueTag = ''
kvalueCh = ''
if kvalueTag is not None or kvalueCh is not None:
data_list.append(kvalueTag + "=" + kvalueCh)
else:
data_list.append('')
# 说明limitCh
try:
limitCh = info['limitCh'].strip().replace('\r', '').replace('\n',
'').replace('\t', '').replace('\x1b', '')
except:
limitCh = ''
if limitCh is None:
data_list.append('')
data_list.append(limitCh)
# 状态status
try:
status = info['status'].strip().replace('\x1b','')
except:
status = ''
if status == '0':
status = "有效"
else:
status = "新增"
data_list.append(status)
Forth_Data.append(data_list)
except:
sss = '本次爬取信息失败'
print(sss)
pass
if __name__ == '__main__':
df = pd.read_excel('E:\\Pycharm_xjf\\JiGou_PaChong\\Biaozhun_Data\\机构地址.xlsx', sheet_name='Sheet1')
data = df['机构地址']
i = 1
for key in data:
connent_url(str(key))
i = i + 1
driver.close()
driver.quit()
df_Sheet1 = | pd.DataFrame(URL_list, columns=['url']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mock
import pytest
import collections
import json
import pandas as pd
from pandas.util.testing import assert_frame_equal
from nbformat.v4 import new_notebook, new_code_cell, new_markdown_cell, new_output
from . import get_notebook_path, get_notebook_dir
from .. import read_notebook, utils
from ..models import Notebook
from ..exceptions import ScrapbookException
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
@pytest.fixture(scope='session', autouse=True)
def kernel_mock():
"""Mocks the kernel to capture warnings during testing"""
with mock.patch.object(utils, 'is_kernel') as _fixture:
yield _fixture
class AnyDict(object):
def __eq__(self, other):
return isinstance(other, dict)
@pytest.fixture
def notebook_result():
path = get_notebook_path("collection/result1.ipynb")
return read_notebook(path)
@pytest.fixture
def notebook_backwards_result():
path = get_notebook_path("record.ipynb")
return read_notebook(path)
def test_bad_path():
with pytest.raises(FileNotFoundError):
Notebook("not/a/valid/path.ipynb")
def test_bad_ext():
with pytest.raises(Warning):
Notebook("not/a/valid/extension.py")
@mock.patch("papermill.iorw.papermill_io.read")
def test_good_ext_for_url(mock_read):
sample_output = {
"cells": [{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": []
}]
}
mock_read.return_value = json.dumps(sample_output)
params = "?sig=some-unique-secret-token"
url = "abs://mystorage.blob.core.windows.net/my-actual-notebook.ipynb" + params
Notebook(url)
mock_read.assert_called_once()
def test_bad_ext_for_url():
with pytest.raises(Warning):
params = "?sig=some-unique-secret-token"
url = "abs://mystorage.blob.core.windows.net/my-actual-notebook.txt" + params
Notebook(url)
def test_filename(notebook_result):
assert notebook_result.filename == "result1.ipynb"
def test_directory(notebook_result):
assert notebook_result.directory == get_notebook_dir("collection/result1.ipynb")
def test_parameters(notebook_result):
assert notebook_result.parameters == dict(foo=1, bar="hello")
def test_data_scraps(notebook_result):
assert notebook_result.scraps.data_dict == {
"dict": {u"a": 1, u"b": 2},
"list": [1, 2, 3],
"number": 1,
"one": 1,
}
def test_display_scraps(notebook_result):
assert notebook_result.scraps.display_dict == {
"output": {
"data": {"text/plain": "'Hello World!'"},
"metadata": {
"scrapbook": {
"name": "output",
"data": False,
"display": True,
}
},
"output_type": "display_data",
},
"one_only": {
"data": {"text/plain": "'Just here!'"},
"metadata": {
"scrapbook": {"name": "one_only", "data": False, "display": True}
},
"output_type": "display_data",
},
}
def test_scraps_collection_dataframe(notebook_result):
expected_df = pd.DataFrame(
[
("one", 1, "json", None),
("number", 1, "json", None),
("list", [1, 2, 3], "json", None),
("dict", {u"a": 1, u"b": 2}, "json", None),
("output", None, "display", AnyDict()),
("one_only", None, "display", AnyDict()),
],
columns=["name", "data", "encoder", "display"],
)
| assert_frame_equal(notebook_result.scraps.dataframe, expected_df, check_exact=True) | pandas.util.testing.assert_frame_equal |
from sklearn.model_selection import ShuffleSplit, learning_curve
import numpy as np
import pandas as pd
import seaborn as sns
def plot_learning_curves(
model, X, y,
n_repeats=1,
train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, title=None,
**kwargs
):
if cv is None and n_repeats > 1:
cv = ShuffleSplit(n_splits=n_repeats)
train_sizes, train_scores, test_scores = learning_curve(
model, X, y, train_sizes=train_sizes, cv=cv, scoring=scoring
)
lc_df = | pd.DataFrame(columns=['size', 'score', 'traintest']) | pandas.DataFrame |
import duckdb
import pandas as pd
import numpy
import pytest
def check_category_equal(category):
df_in = pd.DataFrame({
'x': pd.Categorical(category, ordered=True),
})
df_out = duckdb.query_df(df_in, "data", "SELECT * FROM data").df()
assert df_in.equals(df_out)
def check_create_table(category):
conn = duckdb.connect()
conn.execute ("PRAGMA enable_verification")
df_in = pd.DataFrame({
'x': pd.Categorical(category, ordered=True),
'y': pd.Categorical(category, ordered=True)
})
df_out = duckdb.query_df(df_in, "data", "SELECT * FROM data").df()
assert df_in.equals(df_out)
conn.execute("CREATE TABLE t1 AS SELECT * FROM df_in")
conn.execute("CREATE TABLE t2 AS SELECT * FROM df_in")
# Do a insert to trigger string -> cat
conn.execute("INSERT INTO t1 VALUES ('2','2')")
res = conn.execute("SELECT x FROM t1 where x = '1'").fetchall()
assert res == [('1',)]
res = conn.execute("SELECT t1.x FROM t1 inner join t2 on (t1.x = t2.x)").fetchall()
assert res == conn.execute("SELECT x FROM t1").fetchall()
# Can't compare different ENUMs
with pytest.raises(Exception):
conn.execute("SELECT * FROM t1 inner join t2 on (t1.x = t2.y)").fetchall()
assert res == conn.execute("SELECT x FROM t1").fetchall()
# Triggering the cast with ENUM as a src
conn.execute("ALTER TABLE t1 ALTER x SET DATA TYPE VARCHAR")
class TestCategory(object):
def test_category_simple(self, duckdb_cursor):
df_in = pd.DataFrame({
'float': [1.0, 2.0, 1.0],
'int': pd.Series([1, 2, 1], dtype="category")
})
df_out = duckdb.query_df(df_in, "data", "SELECT * FROM data").df()
print (duckdb.query_df(df_in, "data", "SELECT * FROM data").fetchall())
print (df_out['int'])
assert numpy.all(df_out['float'] == numpy.array([1.0, 2.0, 1.0]))
assert numpy.all(df_out['int'] == numpy.array([1, 2, 1]))
def test_category_nulls(self, duckdb_cursor):
df_in = pd.DataFrame({
'int': pd.Series([1, 2, None], dtype="category")
})
df_out = duckdb.query_df(df_in, "data", "SELECT * FROM data").df()
print (duckdb.query_df(df_in, "data", "SELECT * FROM data").fetchall())
assert df_out['int'][0] == 1
assert df_out['int'][1] == 2
assert numpy.isnan(df_out['int'][2])
def test_category_string(self, duckdb_cursor):
check_category_equal(['foo','bla','zoo', 'foo', 'foo', 'bla'])
def test_category_string_null(self, duckdb_cursor):
check_category_equal(['foo','bla',None,'zoo', 'foo', 'foo',None, 'bla'])
def test_categorical_fetchall(self, duckdb_cursor):
df_in = pd.DataFrame({
'x': pd.Categorical(['foo','bla',None,'zoo', 'foo', 'foo',None, 'bla'], ordered=True),
})
assert duckdb.query_df(df_in, "data", "SELECT * FROM data").fetchall() == [('foo',), ('bla',), (None,), ('zoo',), ('foo',), ('foo',), (None,), ('bla',)]
def test_category_string_uint8(self, duckdb_cursor):
category = []
for i in range (10):
category.append(str(i))
check_create_table(category)
def test_category_fetch_df_chunk(self, duckdb_cursor):
con = duckdb.connect()
categories = ['foo','bla',None,'zoo', 'foo', 'foo',None, 'bla']
result = categories*128
categories = result * 2
df_result = pd.DataFrame({
'x': pd.Categorical(result, ordered=True),
})
df_in = pd.DataFrame({
'x': pd.Categorical(categories, ordered=True),
})
con.register("data", df_in)
query = con.execute("SELECT * FROM data")
cur_chunk = query.fetch_df_chunk()
assert(cur_chunk.equals(df_result))
cur_chunk = query.fetch_df_chunk()
assert(cur_chunk.equals(df_result))
cur_chunk = query.fetch_df_chunk()
assert(cur_chunk.empty)
def test_category_mix(self, duckdb_cursor):
df_in = pd.DataFrame({
'float': [1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 0.0],
'x': | pd.Categorical(['foo','bla',None,'zoo', 'foo', 'foo',None, 'bla'], ordered=True) | pandas.Categorical |
# Copyright 2018 <NAME> <EMAIL>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import os
import warnings
import datetime
from .dataset import Dataset
from .dataframe_tools import *
from .exceptions import FailedReindexWarning, PublicationEmbargoWarning, ReindexMapError, InvalidParameterError
class UcecConf(Dataset):
def __init__(self, version="latest", no_internet=False):
"""Load all of the dataframes as values in the self._data dict variable, with names as keys, and format them properly.
Parameters:
version (str, optional): The version number to load, or the string "latest" to just load the latest building. Default is "latest".
no_internet (bool, optional): Whether to skip the index update step because it requires an internet connection. This will be skipped automatically if there is no internet at all, but you may want to manually skip it if you have a spotty internet connection. Default is False.
"""
# Set some needed variables, and pass them to the parent Dataset class __init__ function
# This keeps a record of all versions that the code is equipped to handle. That way, if there's a new data release but they didn't update their package, it won't try to parse the new data version it isn't equipped to handle.
valid_versions = ["1.0", "1.1", "1.2", "2.0", "2.0.1"]
data_files = {
"1.0": [
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
#"UCEC_confirmatory_Direct_SRM_tumor_v1.0.cct.gz", #SRM not to be included in 1.0
#"UCEC_confirmatory_IMAC_SRM_tumor_v1.0.cct.gz",
"UCEC_confirmatory_meta_table_v1.0.xlsx",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.0.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.0.cct.gz",
#"UCEC_confirmatory_nglycoform-site_ratio_median_polishing_log2_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.0.txt.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.0.cct.gz",
#"UCEC_confirmatory_RNAseq_isoform_FPKM_removed_circRNA_log2(x+1)_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_WES_cnv_gistic_thresholded_tumor_v1.0.cct.gz",
"UCEC_confirmatory_WES_cnv_log2_ratio_tumor_v1.0.cct.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.0.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v1.0.maf.gz",
#"UCEC_confirmatory_WGS_SV_tumor_v1.0.txt.gz" #structural_variation - not to be included in 1.0
],
"1.1": [
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_meta_table_v1.1.xlsx",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.1.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.1.txt.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_SRM_Direct_tumor_v1.1.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v1.1.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v1.1.cct.gz",
"UCEC_confirmatory_WES_cnv_gistic_thresholded_tumor_v1.1.cct.gz",
"UCEC_confirmatory_WES_cnv_log2_ratio_tumor_v1.1.cct.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.1.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v1.1.maf.gz",
],
"1.2": [
"UCEC_confirmatory_meta_table_v1.2.xlsx",
"UCEC_confirmatory_SRM_Direct_tumor_v1.2.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v1.2.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v1.2.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.2.txt.gz",
# "UCEC_confirmatory_RNAseq_isoform_FPKM_removed_circRNA_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_WGS_cnv_gistic_thresholded_tumor_v1.2.cct.gz",
"UCEC_confirmatory_WGS_cnv_log2_ratio_tumor_v1.2.cct.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.2.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v1.2.maf.gz",
# "UCEC_confirmatory_WGS_SV_tumor_v1.2.txt.gz",
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.2.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.2.cct.gz",
# "UCEC_confirmatory_nglycoform-site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
],
"2.0": [
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_meta_table_v2.0.xlsx",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v2.0.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v2.0.txt.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_SRM_Direct_tumor_v2.0.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v2.0.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v2.0.cct.gz",
# "UCEC_confirmatory_WES_somatic_mutation_category_level_V1.2.txt.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.2.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v2.0.maf.gz",
"UCEC_confirmatory_WGS_cnv_gistic_thresholded_tumor_v2.0.cct.gz",
"UCEC_confirmatory_WGS_cnv_log2_ratio_tumor_v2.0.cct.gz",
# "UCEC_confirmatory_WGS_SV_tumor_v2.0.txt.gz",
],
"2.0.1": [
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_meta_table_v2.0.1.xlsx",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v2.0.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v2.0.txt.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",
"UCEC_confirmatory_SRM_Direct_tumor_v2.0.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v2.0.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v2.0.cct.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.2.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_v2.0.maf.gz",
"UCEC_confirmatory_WGS_cnv_gistic_thresholded_tumor_v2.0.cct.gz",
"UCEC_confirmatory_WGS_cnv_log2_ratio_tumor_v2.0.cct.gz",
],
}
# Call the parent class __init__ function
super().__init__(cancer_type="ucecconf", version=version, valid_versions=valid_versions, data_files=data_files, no_internet=no_internet)
# Load the data into dataframes in the self._data dict
loading_msg = f"Loading {self.get_cancer_type()} v{self.version()}"
for file_path in self._data_files_paths: # Loops through files variable
# Print a loading message. We add a dot every time, so the user knows it's not frozen.
loading_msg = loading_msg + "."
print(loading_msg, end='\r')
path_elements = file_path.split(os.sep) # Get a list of the levels of the path
file_name = path_elements[-1] # The last element will be the name of the file. We'll use this to identify files for parsing in the if/elif statements below
if file_name in ["UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_acetyl_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["acetylproteomics_gene"] = df
elif file_name in ["UCEC_confirmatory_acetyl_site_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_acetyl_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.reset_index()
df[['Name','Database_ID','Site']] = df.idx.str.split("@", expand=True)
df['Site'] = df['Site'].str.rsplit('-',1,expand=True)[1]
df = df.set_index(["Name", "Site", "Database_ID"])
df = df.drop(columns=["idx"])
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["acetylproteomics"] = df
elif file_name in ["UCEC_confirmatory_meta_table_v1.0.xlsx",
"UCEC_confirmatory_meta_table_v1.1.xlsx",
"UCEC_confirmatory_meta_table_v1.2.xlsx",
"UCEC_confirmatory_meta_table_v2.0.xlsx",
"UCEC_confirmatory_meta_table_v2.0.1.xlsx"]:
df = pd.read_excel(file_path)
df.insert(6, "Proteomics_Tumor_Normal", df["Group"])
df.loc[df['Group'] == 'Enriched_Normal', 'Idx'] = df['Idx'] + '.N'
df.loc[df['Group'] == 'Adjacent_normal', 'Idx'] = df['Idx'].str[:-2] + '.N'
df = df.set_index("Idx")
df.loc[df['Group'] != 'Tumor', 'Group'] = 'Normal'
df = df.rename({'Group': 'Sample_Tumor_Normal'}, axis=1)
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["clinical"] = df
elif file_name in ["UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.0.cct.gz",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.1.cct.gz",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v1.2.cct.gz",
"UCEC_confirmatory_methylation_gene_level_beta_value_tumor_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0, na_values=' NA')
df = df.transpose()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["methylation"] = df
elif file_name in ["UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_miRNAseq_miRNA_TPM_log2(x+1)_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["miRNA"] = df
elif file_name in ["UCEC_confirmatory_phospho_gene_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_phospho_gene_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["phosphoproteomics_gene"] = df
elif file_name in ["UCEC_confirmatory_phospho_site_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_phospho_site_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.reset_index()
df[['Name','Database_ID','Site']] = df.idx.str.split("@", expand=True)
df['Site'] = df['Site'].str.rsplit('-',1,expand=True)[1]
df = df.set_index(["Name", "Site", "Database_ID"])
df = df.drop(columns=["idx"])
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["phosphoproteomics"] = df
elif file_name in ["UCEC_confirmatory_proteomics_ratio_median_polishing_log22_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_proteomics_ratio_median_polishing_log2_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["proteomics"] = df
elif file_name in ["UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_RNAseq_circRNA_RSEM_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["circular_RNA"] = df
elif file_name in ["UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.0.txt.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.1.txt.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v1.2.txt.gz",
"UCEC_confirmatory_RNAseq_gene_fusion_tumor_v2.0.txt.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.reset_index()
df = df.set_index("Sample")
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["gene_fusion"] = df
elif file_name in ["UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.0.cct.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.1.cct.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v1.2.cct.gz",
"UCEC_confirmatory_RNAseq_gene_RSEM_removed_circRNA_UQ_log2(x+1)_tumor_normal_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["transcriptomics"] = df
# Targeted proteomics is the direct and PRISM SRM data
elif file_name in ["UCEC_confirmatory_SRM_Direct_tumor_v1.1.cct.gz",
"UCEC_confirmatory_SRM_Direct_tumor_v1.2.cct.gz",
"UCEC_confirmatory_SRM_Direct_tumor_v2.0.cct.gz",]:
df_direct = pd.read_csv(file_path, sep='\t')
df_direct[['Name','Peptide']] = df_direct['idx'].str.rsplit("-", 1, expand=True)
df_direct = df_direct.set_index(["Name", "Peptide"])
df_direct = df_direct.drop(columns=["idx"])
df_direct = df_direct.transpose()
df_direct = df_direct.sort_index()
df_direct.index.name = "Patient_ID"
# Merge if we have both
if "targeted_proteomics" in self._data:
df_prism = self._data["targeted_proteomics"]
df_combined = pd.concat([df_direct, df_prism])
df_combined.index.name = "Patient_ID"
df_combined.columns.name = "Name"
self._data["targeted_proteomics"] = df_combined
else:
self._data["targeted_proteomics"] = df_direct
elif file_name in ["UCEC_confirmatory_SRM_PRISM_tumor_v1.1.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v1.2.cct.gz",
"UCEC_confirmatory_SRM_PRISM_tumor_v2.0.cct.gz",]:
df_prism = pd.read_csv(file_path, sep='\t')
df_prism[['Name','Peptide']] = df_prism['idx'].str.rsplit("-", 1, expand=True)
df_prism = df_prism.set_index(["Name", "Peptide"])
df_prism = df_prism.drop(columns=["idx"])
df_prism = df_prism.transpose()
df_prism = df_prism.sort_index()
df_prism.index.name = "Patient_ID"
# Merge if we have both
if "targeted_proteomics" in self._data:
df_direct = self._data["targeted_proteomics"]
df_combined = pd.concat([df_direct, df_prism])
df_combined.index.name = "Patient_ID"
df_combined.columns.name = "Name"
self._data["targeted_proteomics"] = df_combined
else:
self._data["targeted_proteomics"] = df_prism
elif file_name in ["UCEC_confirmatory_SRM_IMAC_tumor_v1.1.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v1.2.cct.gz",
"UCEC_confirmatory_SRM_IMAC_tumor_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t')
df.at[0,'idx'] = "FPSS[+80]PLRIPGGNIY[+80]ISPLK"
df['Name'] = "RB1"
df = df.rename(columns={"idx":"Peptide"})
df = df.set_index(["Name", "Peptide"])
df = df.transpose()
df = df.sort_index()
df.columns.name = "Name"
df.index.name = "Patient_ID"
self._data["targeted_phosphoproteomics"] = df
elif file_name in ["UCEC_confirmatory_WES_cnv_gistic_thresholded_tumor_v1.0.cct.gz",
"UCEC_confirmatory_WES_cnv_gistic_thresholded_tumor_v1.1.cct.gz",
"UCEC_confirmatory_WGS_cnv_gistic_thresholded_tumor_v1.2.cct.gz",
"UCEC_confirmatory_WGS_cnv_gistic_thresholded_tumor_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t')
df[['Name','Chromosome']] = df.idx.str.split("|", expand=True)
df = df.set_index(["Name"])
df = df.drop(columns=["idx", "Chromosome"])
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["CNV_gistic"] = df
elif file_name in ["UCEC_confirmatory_WES_cnv_log2_ratio_tumor_v1.0.cct.gz",
"UCEC_confirmatory_WES_cnv_log2_ratio_tumor_v1.1.cct.gz",
"UCEC_confirmatory_WGS_cnv_log2_ratio_tumor_v1.2.cct.gz",
"UCEC_confirmatory_WGS_cnv_log2_ratio_tumor_v2.0.cct.gz",]:
df = pd.read_csv(file_path, sep='\t')
df[['Name','Chromosome']] = df.idx.str.split("|", expand=True)
df = df.set_index(["Name"])
df = df.drop(columns=["idx", "Chromosome"])
df = df.transpose()
df = df.sort_index()
df.index.name = "Patient_ID"
self._data["CNV_log2ratio"] = df
elif file_name in ["UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.0.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.1.cbt.gz",
"UCEC_confirmatory_WES_somatic_mutation_gene_level_V1.2.cbt.gz"]:
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.transpose()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["somatic_mutation_binary"] = df
elif file_name in ["UCEC_confirmatory_WES_somatic_mutation_v1.0.maf.gz",
"UCEC_confirmatory_WES_somatic_mutation_v1.1.maf.gz",
"UCEC_confirmatory_WES_somatic_mutation_v1.2.maf.gz",
"UCEC_confirmatory_WES_somatic_mutation_v2.0.maf.gz",]:
df = | pd.read_csv(file_path, sep='\t', dtype={88:object}) | pandas.read_csv |
# Copyright 2019-2020 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pickle
import logging
from pathlib import Path
import numpy as np
import pandas as pd
from asreview import __version__ as asreview_version
from asreview.config import LABEL_NA
from asreview.data import ASReviewData
from asreview.webapp.utils.paths import get_data_file_path
from asreview.webapp.utils.paths import get_labeled_path
from asreview.webapp.utils.paths import get_pool_path
from asreview.webapp.utils.paths import get_proba_path
from asreview.webapp.utils.paths import get_project_path
class CacheDataError(Exception):
pass
def _get_cache_data_path(project_id):
fp_data = get_data_file_path(project_id)
return get_data_file_path(project_id) \
.with_suffix(fp_data.suffix + ".pickle")
def _read_data_from_cache(project_id, version_check=True):
fp_data_pickle = _get_cache_data_path(project_id)
try:
# get the pickle data
with open(fp_data_pickle, 'rb') as f_pickle_read:
data_obj, data_obj_version = pickle.load(f_pickle_read)
# validate data object
if not isinstance(data_obj.df, pd.DataFrame):
raise ValueError()
# drop cache files generated by older versions
if (not version_check) or (asreview_version == data_obj_version):
return data_obj
except FileNotFoundError:
# file not available
pass
except Exception as err:
# problem loading pickle file or outdated
# remove the pickle file
logging.error(f"Error reading cache file: {err}")
try:
os.remove(fp_data_pickle)
except FileNotFoundError:
pass
raise CacheDataError()
def _write_data_to_cache(project_id, data_obj):
fp_data_pickle = _get_cache_data_path(project_id)
logging.info("Store a copy of the data in a pickle file.")
with open(fp_data_pickle, 'wb') as f_pickle:
pickle.dump((data_obj, asreview_version), f_pickle)
def read_data(project_id, use_cache=True, save_cache=True):
"""Get ASReviewData object from file.
Parameters
----------
project_id: str, iterable
The project identifier.
use_cache: bool
Use the pickle file if available.
save_cache: bool
Save the file to a pickle file if not available.
Returns
-------
ASReviewData:
The data object for internal use in ASReview.
"""
# use cache file
if use_cache:
try:
return _read_data_from_cache(project_id)
except CacheDataError:
pass
# load from file
fp_data = get_data_file_path(project_id)
data_obj = ASReviewData.from_file(fp_data)
# save a pickle version
if save_cache:
_write_data_to_cache(project_id, data_obj)
return data_obj
def read_pool(project_id):
pool_fp = get_pool_path(project_id)
try:
with open(pool_fp, "r") as f:
pool = json.load(f)
pool = [int(x) for x in pool]
except FileNotFoundError:
pool = None
return pool
def write_pool(project_id, pool):
pool_fp = get_pool_path(project_id)
with open(pool_fp, "w") as f:
json.dump(pool, f)
def read_proba_legacy(project_id):
"""Read a project <0.15 proba values"""
# get the old json project file path
proba_fp = Path(get_project_path(project_id), "proba.json")
with open(proba_fp, "r") as f:
# read the JSON file and make a list of the proba's
proba = json.load(f)
proba = [float(x) for x in proba]
# make a dataframe that looks like the new structure
as_data = read_data(project_id)
proba = pd.DataFrame(
{
"proba": [float(x) for x in proba]
},
index=as_data.record_ids
)
proba.index.name = "record_id"
return proba
def read_proba(project_id):
proba_fp = get_proba_path(project_id)
try:
return | pd.read_csv(proba_fp, index_col="record_id") | pandas.read_csv |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate= | pd.DataFrame(dateofterms) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from ..._plotting._scatter._ScatterPlot_Module import ScatterPlot
def _calculate_LFC(df, condition, control):
"""
df
ResultsDict["lognorm_counts"]
type: pandas DataFrame\
condition
control
"""
lfc = df[condition] - df[control]
return lfc
def _get_conditions(lfc_df):
return pd.Series(lfc_df.columns).str.split(" Rep", expand=True)[0]
def _calculate_multiLFC(df, conditions, controls):
LFCDict = {}
for i in range(len(conditions)):
LFCDict[conditions[i] + " LFC"] = _calculate_LFC(
df, condition=conditions[i], control=controls[i],
)
lfc_df = | pd.DataFrame.from_dict(LFCDict) | pandas.DataFrame.from_dict |
import pandas as pd
tmtids = pd.read_csv('tmtids.csv')
tmtvac = | pd.read_csv('tmt_speedvac_group.csv') | pandas.read_csv |
import unittest
import maccorcyclingdata.testdata as testdata
import os
import pytest
import pandas as pd
from pandas._testing import assert_frame_equal
expath = "example_data/"
exmult = "example_data/multiple_csv/"
exfile = "testdata_errors.csv"
def test_import_maccor_data_BadIn():
expath1 = 1
with pytest.raises(TypeError):
testdata.import_maccor_data(expath1, exfile)
exfile1 = 0
with pytest.raises(TypeError):
testdata.import_maccor_data(expath, exfile1)
header1 = ''
with pytest.raises(TypeError):
testdata.import_maccor_data(expath, exfile, header1)
exfile2 = 'false_df.csv'
with pytest.raises(NotADirectoryError):
testdata.import_maccor_data(expath, exfile2)
return
def test_import_multiple_csv_BadIn():
expath1 = 1
with pytest.raises(TypeError):
testdata.import_multiple_csv_data(expath1)
expath2 = 'false_path/'
with pytest.raises(NotADirectoryError):
testdata.import_multiple_csv_data(expath2)
return
def test_clean_maccor_df_BadIn():
df = 'not dataframe'
with pytest.raises(TypeError):
testdata.clean_maccor_df(df)
df_cols = pd.DataFrame(columns=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
with pytest.raises(IndexError):
testdata.clean_maccor_df(df_cols)
return
def test_delete_cycle_steps_BadIn():
steps_to_delete = []
df = pd.DataFrame()
df1 = 'not dataframe'
with pytest.raises(TypeError):
testdata.delete_cycle_steps(df1)
steps_to_delete1 = "not list"
with pytest.raises(TypeError):
testdata.delete_cycle_steps(df, steps_to_delete1)
decrement = "not boolean"
with pytest.raises(TypeError):
testdata.delete_cycle_steps(df, steps_to_delete, decrement)
df2 = pd.DataFrame(columns=[1,2,3,4,5,6,7,8,9,10])
with pytest.raises(IndexError):
testdata.delete_cycle_steps(df2, steps_to_delete)
df3 = pd.DataFrame(columns=[1,2,3,4,5,6,7,8,9,10, 11])
with pytest.raises(IndexError):
testdata.delete_cycle_steps(df3, steps_to_delete)
return
def test_get_index_range_BadIn():
cyc_range = []
df = pd.DataFrame()
df1 = 'not dataframe'
with pytest.raises(TypeError):
testdata.get_index_range(df1, cyc_range)
cyc_range1 = "not list"
with pytest.raises(TypeError):
testdata.get_index_range(df, cyc_range1)
cycle_steps_idx1 = "not list"
with pytest.raises(TypeError):
testdata.get_index_range(df, cyc_range, cycle_steps_idx1)
df2 = pd.DataFrame(columns=[1,2,3,4,5,6,7,8,9,10])
with pytest.raises(IndexError):
testdata.get_index_range(df2, cyc_range)
df3 = pd.DataFrame(columns=[1,2,3,4,5,6,7,8,9,10, 11])
with pytest.raises(IndexError):
testdata.get_index_range(df3, cyc_range)
return
def test_get_cycle_data_BadIn():
headings = []
cyc_range = []
df = pd.DataFrame()
df1 = 'not dataframe'
with pytest.raises(TypeError):
testdata.get_cycle_data(df1, headings, cyc_range)
cyc_range1 = "not list"
with pytest.raises(TypeError):
testdata.get_cycle_data(df, headings, cyc_range1)
cycle_steps_idx1 = "not list"
with pytest.raises(TypeError):
testdata.get_cycle_data(df, headings, cyc_range, cycle_steps_idx1)
headings1 = "not list"
with pytest.raises(TypeError):
testdata.get_cycle_data(df, headings1, cyc_range)
df2 = pd.DataFrame(columns=[1,2,3,4,5,6,7,8,9,10])
with pytest.raises(IndexError):
testdata.get_cycle_data(df2, headings, cyc_range)
df3 = pd.DataFrame(columns=[1,2,3,4,5,6,7,8,9,10, 11])
with pytest.raises(IndexError):
testdata.get_cycle_data(df3, headings, cyc_range)
return
def test_num_cycles_BadIn():
df = pd.DataFrame()
df1='not dataframe'
with pytest.raises(TypeError):
testdata.get_num_cycles(df1)
df2 = | pd.DataFrame(columns=[1,2,3,4,5,6,7,8,9,10]) | pandas.DataFrame |
from copy import deepcopy
import pytest
from pandas import DataFrame
from omniqubo.converters.converter import interpret
from omniqubo.converters.ineq_to_eq import IneqToEq
from omniqubo.models.sympyopt.constraints import (
INEQ_GEQ_SENSE,
INEQ_LEQ_SENSE,
ConstraintEq,
ConstraintIneq,
)
from omniqubo.models.sympyopt.converters import convert
from omniqubo.models.sympyopt.sympyopt import SympyOpt
class TestIneqToEq:
def test_convert(self):
sympyopt = SympyOpt()
x = sympyopt.int_var(name="x", lb=0, ub=2)
y = sympyopt.int_var(lb=-2, ub=3, name="y")
sympyopt.minimize(2 * x - 3 * y + 2)
sympyopt.add_constraint(ConstraintIneq(2 * x - 3 * y, 3, INEQ_GEQ_SENSE), name="constr1")
sympyopt.add_constraint(
ConstraintIneq(2 * x ** 2 - 3 * y, 0, INEQ_LEQ_SENSE), name="constr2"
)
sympyopt.add_constraint(
ConstraintIneq(2 * x ** 2 - 3 * y ** 3, 0, INEQ_LEQ_SENSE), name="constr3"
)
conv1 = IneqToEq("constr1", False, check_slack=False)
conv2 = IneqToEq("constr2", False, check_slack=False)
conv3 = IneqToEq("constr3", False, check_slack=False)
sympyopt = convert(sympyopt, conv1)
sympyopt = convert(sympyopt, conv2)
sympyopt = convert(sympyopt, conv3)
assert sympyopt.variables["constr1___slack"].get_lb() == 0
assert sympyopt.variables["constr1___slack"].get_ub() == 7
assert sympyopt.variables["constr2___slack"].get_lb() == 0
assert sympyopt.variables["constr2___slack"].get_ub() == 9
assert sympyopt.variables["constr3___slack"].get_lb() == 0
assert sympyopt.variables["constr3___slack"].get_ub() == 81
sympyopt3 = deepcopy(sympyopt)
sympyopt3 = convert(sympyopt, IneqToEq("constr1", True, check_slack=False))
assert sympyopt3 == sympyopt
sympyopt2 = SympyOpt()
x = sympyopt2.int_var(name="x", lb=0, ub=2)
y = sympyopt2.int_var(lb=-2, ub=3, name="y")
xi1 = sympyopt2.int_var(lb=0, ub=7, name="constr1___slack")
xi2 = sympyopt2.int_var(lb=0, ub=9, name="constr2___slack")
xi3 = sympyopt2.int_var(lb=0, ub=81, name="constr3___slack")
sympyopt2.minimize(2 * x - 3 * y + 2)
sympyopt2.add_constraint(ConstraintEq(2 * x - 3 * y - xi1, 3), name="constr1")
sympyopt2.add_constraint(ConstraintEq(2 * x ** 2 - 3 * y + xi2, 0), name="constr2")
sympyopt2.add_constraint(ConstraintEq(2 * x ** 2 - 3 * y ** 3 + xi3, 0), name="constr3")
assert sympyopt2 == sympyopt
def test_zero_slack_geq(self):
# geq
sympyopt = SympyOpt()
x = sympyopt.int_var(name="x", lb=0, ub=2)
y = sympyopt.int_var(lb=-2, ub=3, name="y")
sympyopt.add_constraint(ConstraintIneq(2 * x - 3 * y, 10, INEQ_GEQ_SENSE), name="c1")
conv = IneqToEq(".*", True, check_slack=False)
sympyopt = convert(sympyopt, conv)
sympyopt2 = SympyOpt()
x = sympyopt2.int_var(name="x", lb=0, ub=2)
y = sympyopt2.int_var(lb=-2, ub=3, name="y")
sympyopt2.add_constraint(ConstraintEq(2 * x - 3 * y, 10), name="c1")
assert sympyopt2 == sympyopt
samples1 = DataFrame({"x": [2], "y": [-2], "feasible": [True]})
samples2 = DataFrame({"x": [1], "y": [0], "feasible": [True]})
assert interpret(samples1, conv)["feasible"][0]
assert not interpret(samples2, conv)["feasible"][0]
# leq
sympyopt = SympyOpt()
x = sympyopt.int_var(name="x", lb=0, ub=2)
y = sympyopt.int_var(lb=-2, ub=3, name="y")
sympyopt.add_constraint(ConstraintIneq(2 * x + y, -2, INEQ_LEQ_SENSE), name="c2")
conv = IneqToEq(".*", True, check_slack=False)
sympyopt = convert(sympyopt, conv)
sympyopt2 = SympyOpt()
x = sympyopt2.int_var(name="x", lb=0, ub=2)
y = sympyopt2.int_var(lb=-2, ub=3, name="y")
sympyopt2.add_constraint(ConstraintEq(2 * x + y, -2), name="c2")
assert sympyopt2 == sympyopt
samples1 = DataFrame({"x": [0], "y": [-2], "feasible": [True]})
samples2 = | DataFrame({"x": [1], "y": [3], "feasible": [True]}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2022/1/8 4:54 下午
# @Author: zhoumengjie
# @File : BondUtils.py
import json
import logging
import random
import time
import urllib
import pandas as pd
import requests
from wxcloudrun.common import akclient
header = {'Accept': '*/*',
'Connection': 'keep-alive',
'Content-type': 'application/json;charset=utf-8',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
cookie = requests.cookies.RequestsCookieJar()
cookie.set('kbzw__user_login', '7Obd08_P1ebax9aXzaPEpdCYrqXR0dTn8OTb3crUja2aqtqr2cPSkavfqKHcnKiYppOprtXdxtPGqqyon7ClmJ2j1uDb0dWMppOkqqefmqekt7e_1KLA59vZzeDapJ6nnJeKw8La4OHs0OPJr5m-1-3R44LDwtqXwsuByIGlqdSarsuui5ai5-ff3bjVw7_i6Ziun66QqZeXn77Atb2toJnh0uTRl6nbxOLmnJik2NPj5tqYsqSlkqSVrqyrppmggcfa28rr1aaXqZilqqk.;')
cookie.set('kbz_newcookie', '1;')
cookie.set('kbzw_r_uname', 'VANDY;')
cookie.set('kbzw__Session', 'fcqdk3pa4tlatoh6c338e19ju2;')
log = logging.getLogger('log')
jisilu_host = 'https://www.jisilu.cn'
cninfo_webapi_host = 'http://webapi.cninfo.com.cn'
east_host = 'https://emweb.securities.eastmoney.com'
zsxg_host = 'https://zsxg.cn'
cninfo_host = 'http://www.cninfo.com.cn'
cninfo_static_host = 'http://static.cninfo.com.cn/'
image_host = 'https://dficimage.toutiao.com'
code_suff_cache = {}
def format_func(num):
return '{:g}'.format(float(num))
class Crawler:
def __init__(self, timeout=10):
self.__timeout = timeout
def query_list(self):
r""" 查询待发可转债列表
:return:
"""
# 时间戳
now = time.time() # 原始时间数据
timestamp = int(round(now * 1000))
param = {"___jsl": "LST___t=" + str(timestamp)}
r = requests.post(jisilu_host + "/data/cbnew/pre_list/", params=param, headers=header, cookies=cookie)
if r.status_code != 200:
log.info("查询待发可转债列表失败:status_code = " + str(r.status_code))
return None
return r.json()['rows']
def user_info(self):
r = requests.post(jisilu_host + '/webapi/account/userinfo/', headers=header, cookies=cookie)
if r.status_code != 200:
print("查询集思录用户信息失败:status_code = " + str(r.status_code))
return False
data = r.json()
return data['code'] == 200 and data['data'] is not None
def login(self):
h = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
}
data = 'return_url=' + 'https://www.jisilu.cn/web/data/cb/list' + '&user_name=<PASSWORD>&password=<PASSWORD>&net_auto_login=1&agreement_chk=agree&_post_type=ajax&aes=1'
r = requests.post(jisilu_host + '/account/ajax/login_process/', data=data, headers=h, cookies=cookie)
if r.status_code != 200:
log.info("登录失败:status_code = " + str(r.status_code))
return False
# 重新设置cookies
cookies = r.cookies.get_dict()
for key in cookies.keys():
cookie.set(key, cookies[key])
# refer
h = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
}
r = requests.get(jisilu_host + '/', cookies=cookie, headers=h)
if r.status_code != 200:
log.info("登录重定向失败:status_code = " + str(r.status_code))
return False
cookies = r.cookies.get_dict()
for key in cookies.keys():
cookie.set(key, cookies[key])
return True
def query_all_bond_list(self) -> pd.DataFrame:
r"""
查询所有已经上市的可转债
:return:
"""
# 先判断是否登录,如果没有则登录
is_login = self.user_info()
if not is_login:
print('jisilu no login...')
is_login = self.login()
print('jisilu login result:{}'.format(is_login))
h = {
'Content-Type': 'application/json; charset=utf-8',
'Init': '1',
'Referer': 'https://www.jisilu.cn/web/data/cb/list',
'Columns': '1,70,2,3,5,6,11,12,14,15,16,29,30,32,34,35,75,44,46,47,52,53,54,56,57,58,59,60,62,63,67',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36'
}
# data = 'btype=C&listed=Y&qflag=N'
r = requests.get(jisilu_host + "/webapi/cb/list_new/", headers=h, cookies=cookie)
if r.status_code != 200:
print("查询所有可转债列表失败:status_code = " + str(r.status_code))
return None
rows = r.json()['data']
df = | pd.DataFrame(rows) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
PEP 8 -- Style Guide for Python Code
https://www.python.org/dev/peps/pep-0008/
@author: visintin
without classes and methods
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sub.algs as myAlgs
plt.close('all') # close all the figures that might still be open from previous runs
x= | pd.read_csv("data/parkinsons_updrs.csv") | pandas.read_csv |
from data_cleanup import data_clean
from data_cleanup import split_data
from csv_import import descriptive
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_fscore_support
import pandas as pd
import numpy as np
# This function creates dataframe with dummies replacing object variables
def dummied_data(df):
# Separate the numeric columns
cdata_num = df.select_dtypes(exclude=['object'])
# Separate object columns
cdata_cat = df.select_dtypes(include=['object'])
# One-hot encode the object columns only
cdata_cat_onehot = | pd.get_dummies(cdata_cat) | pandas.get_dummies |
import logging
import os
import re
import pandas as pd
import numpy as np
from pandas import DataFrame
from tqdm import tqdm
from joblib import dump, load
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from ..MyMertrics import *
from pprint import pformat
from util.file_manager import remake_dir
def feature_filter(features_list):
res = []
for feature in features_list:
if len(feature) < 2:
res.append(feature)
return res
def merge_similar_feature(data, features):
column = data[features].sum(axis=1)
df = | pd.DataFrame(data=column, columns=[features[0]]) | pandas.DataFrame |
import argparse
from pathlib import Path
import graphviz
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import tree
from..utils import STATUS_ARROW, search_runs
def main():
parser = argparse.ArgumentParser(
"qTable inspector", description="Inspect the Q-Learning results"
)
parser.add_argument('path', help='Path of qTable csv')
args, _ = parser.parse_known_args()
df = pd.read_csv(args.path)
filename = Path(args.path)
actions = [column for column in df.columns if column.find("Action") != -1]
state_features = [
column for column in df.columns if column.find("Action") == -1
]
df['best'] = df[actions].idxmax(axis=1)
df['explored'] = df[actions].apply(
lambda row: not all([val == 0. for val in row]), axis=1
)
df_dtree_X = df[df.explored][state_features].copy()
for column in state_features:
cur_column = df_dtree_X[column]
new_type = pd.to_numeric(cur_column[cur_column != "max"]).dtype
if new_type == np.float64:
df_dtree_X[column].replace(to_replace={
# 'max': np.finfo(np.float32).max
'max': float(np.iinfo(np.int32).max)
# 'max': -1.
}, inplace=True)
df_dtree_X[column] = | pd.to_numeric(df_dtree_X[column]) | pandas.to_numeric |
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import os
import argparse
def get_header_size(pir_file):
"""
Get size of header (lines)
"""
with open (pir_file, 'r') as f:
line = f.readline()
return int(line.split(' ')[1])+1
def get_header(pir_file):
"""
Get header from PIR file
"""
n = get_header_size(pir_file)
with open(pir_file) as f:
header = ''.join([next(f) for _ in range(n)])
return header, n
def check_headers(pir_files):
"""
Check whether headers have same size
"""
s = [get_header_size(f) for f in pir_files]
if len(np.unique(s))!=1:
raise ValueError('Header sizes do not match: {}'.format(s))
def concatenate_pir_files(pir_files, output_pir):
"""
Concatenate PIR files to 'output_pir'. All files must have the same header
"""
check_headers(pir_files)
header, header_lines = get_header(pir_files[0])
with open(output_pir, 'w') as out:
out.write(header) # write header once
for pir_file in pir_files:
with open(pir_file) as f:
for _ in range(header_lines): # skip header
next(f)
for line in f:
out.write(line)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Combine output from extractPIRs run on individual samples. Equivalent to running extractPIRs on multiple samples.')
parser.add_argument('pir_files_tsv',
help='TSV containing paths to PIR files, in the format: [[bam1_chr1, ..., bam1_chrN], ..., [bamM_chr1, ..., bamM_chrN]].\
\nPIR file names must be in the format <sample_id>.<chr>.pir')
parser.add_argument('chr_list', help='File listing chromosomes to process.')
parser.add_argument('prefix', help='Prefix for output files: <prefix>.<chr>.pir')
parser.add_argument('-o', '--output_dir', help='Output directory')
args = parser.parse_args()
with open(args.chr_list) as f:
chr_order = f.read().strip().split('\n')
print('Sorting PIR files by chromosome.', flush=True)
pir_files_df = pd.read_csv(args.pir_files_tsv, header=None, sep='\t')
pir_files = pir_files_df.values.tolist()
# sort by chromosome order
sorted_pir_files = []
for p in pir_files:
pir_dict = {os.path.split(i)[1].split('.')[1]:i for i in p}
sorted_pir_files.append([pir_dict[i] for i in chr_order])
pir_files_df = | pd.DataFrame(sorted_pir_files, columns=chr_order) | pandas.DataFrame |
import pandas as pd
import numpy as np
import requests
import matplotlib.pyplot as plt
from math import floor
from termcolor import colored as cl
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (20,10)
def get_historical_data(symbol, start_date):
api_key = 'YOUR API KEY'
api_url = f'https://api.twelvedata.com/time_series?symbol={symbol}&interval=1day&outputsize=5000&apikey={api_key}'
raw_df = requests.get(api_url).json()
df = pd.DataFrame(raw_df['values']).iloc[::-1].set_index('datetime').astype(float)
df = df[df.index >= start_date]
df.index = pd.to_datetime(df.index)
return df
aapl = get_historical_data('AAPL', '2020-01-01')
aapl
def get_adx(high, low, close, lookback):
plus_dm = high.diff()
minus_dm = low.diff()
plus_dm[plus_dm < 0] = 0
minus_dm[minus_dm > 0] = 0
tr1 = pd.DataFrame(high - low)
tr2 = pd.DataFrame(abs(high - close.shift(1)))
tr3 = pd.DataFrame(abs(low - close.shift(1)))
frames = [tr1, tr2, tr3]
tr = pd.concat(frames, axis = 1, join = 'inner').max(axis = 1)
atr = tr.rolling(lookback).mean()
plus_di = 100 * (plus_dm.ewm(alpha = 1/lookback).mean() / atr)
minus_di = abs(100 * (minus_dm.ewm(alpha = 1/lookback).mean() / atr))
dx = (abs(plus_di - minus_di) / abs(plus_di + minus_di)) * 100
adx = ((dx.shift(1) * (lookback - 1)) + dx) / lookback
adx_smooth = adx.ewm(alpha = 1/lookback).mean()
return plus_di, minus_di, adx_smooth
aapl['plus_di'] = pd.DataFrame(get_adx(aapl['high'], aapl['low'], aapl['close'], 14)[0]).rename(columns = {0:'plus_di'})
aapl['minus_di'] = pd.DataFrame(get_adx(aapl['high'], aapl['low'], aapl['close'], 14)[1]).rename(columns = {0:'minus_di'})
aapl['adx'] = pd.DataFrame(get_adx(aapl['high'], aapl['low'], aapl['close'], 14)[2]).rename(columns = {0:'adx'})
aapl = aapl.dropna()
aapl.tail()
ax1 = plt.subplot2grid((11,1), (0,0), rowspan = 5, colspan = 1)
ax2 = plt.subplot2grid((11,1), (6,0), rowspan = 5, colspan = 1)
ax1.plot(aapl['close'], linewidth = 2, color = '#ff9800')
ax1.set_title('AAPL CLOSING PRICE')
ax2.plot(aapl['plus_di'], color = '#26a69a', label = '+ DI 14', linewidth = 3, alpha = 0.3)
ax2.plot(aapl['minus_di'], color = '#f44336', label = '- DI 14', linewidth = 3, alpha = 0.3)
ax2.plot(aapl['adx'], color = '#2196f3', label = 'ADX 14', linewidth = 3)
ax2.axhline(25, color = 'grey', linewidth = 2, linestyle = '--')
ax2.legend()
ax2.set_title('AAPL ADX 14')
plt.show()
def implement_adx_strategy(prices, pdi, ndi, adx):
buy_price = []
sell_price = []
adx_signal = []
signal = 0
for i in range(len(prices)):
if adx[i-1] < 25 and adx[i] > 25 and pdi[i] > ndi[i]:
if signal != 1:
buy_price.append(prices[i])
sell_price.append(np.nan)
signal = 1
adx_signal.append(signal)
else:
buy_price.append(np.nan)
sell_price.append(np.nan)
adx_signal.append(0)
elif adx[i-1] < 25 and adx[i] > 25 and ndi[i] > pdi[i]:
if signal != -1:
buy_price.append(np.nan)
sell_price.append(prices[i])
signal = -1
adx_signal.append(signal)
else:
buy_price.append(np.nan)
sell_price.append(np.nan)
adx_signal.append(0)
else:
buy_price.append(np.nan)
sell_price.append(np.nan)
adx_signal.append(0)
return buy_price, sell_price, adx_signal
buy_price, sell_price, adx_signal = implement_adx_strategy(aapl['close'], aapl['plus_di'], aapl['minus_di'], aapl['adx'])
ax1 = plt.subplot2grid((11,1), (0,0), rowspan = 5, colspan = 1)
ax2 = plt.subplot2grid((11,1), (6,0), rowspan = 5, colspan = 1)
ax1.plot(aapl['close'], linewidth = 3, color = '#ff9800', alpha = 0.6)
ax1.set_title('AAPL CLOSING PRICE')
ax1.plot(aapl.index, buy_price, marker = '^', color = '#26a69a', markersize = 14, linewidth = 0, label = 'BUY SIGNAL')
ax1.plot(aapl.index, sell_price, marker = 'v', color = '#f44336', markersize = 14, linewidth = 0, label = 'SELL SIGNAL')
ax2.plot(aapl['plus_di'], color = '#26a69a', label = '+ DI 14', linewidth = 3, alpha = 0.3)
ax2.plot(aapl['minus_di'], color = '#f44336', label = '- DI 14', linewidth = 3, alpha = 0.3)
ax2.plot(aapl['adx'], color = '#2196f3', label = 'ADX 14', linewidth = 3)
ax2.axhline(25, color = 'grey', linewidth = 2, linestyle = '--')
ax2.legend()
ax2.set_title('AAPL ADX 14')
plt.show()
position = []
for i in range(len(adx_signal)):
if adx_signal[i] > 1:
position.append(0)
else:
position.append(1)
for i in range(len(aapl['close'])):
if adx_signal[i] == 1:
position[i] = 1
elif adx_signal[i] == -1:
position[i] = 0
else:
position[i] = position[i-1]
close_price = aapl['close']
plus_di = aapl['plus_di']
minus_di = aapl['minus_di']
adx = aapl['adx']
adx_signal = pd.DataFrame(adx_signal).rename(columns = {0:'adx_signal'}).set_index(aapl.index)
position = | pd.DataFrame(position) | pandas.DataFrame |
#!/usr/bin/env python3
import re
import warnings
warnings.simplefilter(action='ignore', category=UserWarning)
import numpy as np
import pandas as pd
import pyranges as pr
from src import logger
from src.query import bam
__all__ = ["Interval", "Regions"]
tag_parser = re.compile(r"(?P<chrom>chr.{1,2}):(?P<start>\d*)-(?P<end>\d*)_?(?P<strand>[+-]?)")
#--------------------------------------------------------------------------------------------------#
class Interval:
"""Class for performing basic queries and manipulations on a single `Interval`."""
def __init__(self, chrom, start, end, strand=None, name=None):
self.chrom = chrom if chrom.startswith("chr") else f"chr{chrom}"
self.start = int(start)
self.end = int(end)
self.strand = strand
self.name = self.tag if name is None else name
self._validate()
self.is_stranded = self.strand == "+" or self.strand == "-"
# position at genomic features
self.mid = (self.start + self.end) // 2
if self.is_stranded:
self.tss = self.start if self.strand == "+" else self.end
self.tes = self.start if self.strand == "-" else self.end
def _validate(self):
"""Check validity of constructor arguments."""
assert self.end > self.start
assert self.strand in ["+", "-", ".", None]
# TODO: check bounds are within chromosome sizes
@classmethod
def load_tag(cls, tag):
parsed_tag = tag_parser.match(tag).groupdict()
parsed_tag["start"], parsed_tag["end"] = int(parsed_tag["start"]), int(parsed_tag["end"])
if parsed_tag["strand"] == "": parsed_tag["strand"] = None
return cls(**parsed_tag)
@classmethod
def load_ensg(cls, gene):
from src.load import aals
assert gene in aals.gene_coords.index
chrom, start, end, strand = aals.gene_coords.loc[gene]
return cls(chrom, start, end, strand, name=gene)
@classmethod
def load(cls, *args):
"""Lazy loading."""
if len(args) == 1 and isinstance(args[0], Interval):
return args[0]
elif len(args) == 1 and isinstance(args[0], str):
if args[0].startswith("chr"):
return cls.load_tag(args[0])
elif args[0].startswith("ENSG"):
return cls.load_ensg(args[0])
else:
raise ValueError("Could not load Interval.")
elif len(args) == 1 and isinstance(args[0], pd.Series):
return cls(**args[0], name=args[0].name)
else:
return cls(*args[0])
#----------------------------------------------------------------------------------------------------#
# Access genomic features as `Interval` objects
#----------------------------------------------------------------------------------------------------#
@property
def as_start(self):
return Interval(self.chrom, self.start, self.start+1, self.strand)
@property
def as_end(self):
return Interval(self.chrom, self.end-1, self.end, self.strand)
@property
def as_mid(self):
return Interval(self.chrom, self.mid, self.mid+1, self.strand)
@property
def as_tss(self):
return Interval(self.chrom, self.tss, self.tss+1, self.strand)
@property
def as_tes(self):
return Interval(self.chrom, self.tes-1, self.tes, self.strand)
def get_pos(self, gf):
"""Access position by string or returns default genomic feature."""
if gf == "ref": gf = "tss" if self.is_stranded else "mid"
return getattr(self, f"{gf}")
#----------------------------------------------------------------------------------------------------#
# Operations to generate new `Interval` instances relative to `self`
#----------------------------------------------------------------------------------------------------#
def widen(self, w):
return Interval(self.chrom, self.start-w, self.end+w, self.strand)
def slide(self, s, wrt_strand=None):
if self.is_stranded and s != 0 and wrt_strand is None:
raise ValueError("`wrt_strand` must be explicit if `Interval` is stranded.")
if wrt_strand and self.strand == "-": s = -s
return Interval(self.chrom, self.start+s, self.end+s, self.strand)
def transform(self, w=0, s=0, wrt_strand=None):
"""Expand the region by `window`, shift the region downstream (3' direction) by `shift`. """
return self.widen(w=w).slide(s=s, wrt_strand=wrt_strand)
#----------------------------------------------------------------------------------------------------#
# Queries
#----------------------------------------------------------------------------------------------------#
def get_genotypes(self):
from src.analysis import vcf
return vcf.query_interval(self.chrom, self.start, self.end)
def get_rna_coverages(self):
coverages = self.as_Regions().get_rna_coverages()
return coverages.iloc[0]
def get_atac_coverages(self):
coverages = self.as_Regions().get_atac_coverages()
return coverages.iloc[0]
def get_pileups(self):
# TODO
# get_pileups_in_interval
pass
#----------------------------------------------------------------------------------------------------#
# Output formats
#----------------------------------------------------------------------------------------------------#
@property
def tag(self):
return coords_to_tag(self.chrom, self.start, self.end)
def unstrand(self):
if self.is_stranded:
return Interval(self.chrom, self.start, self.end, name=self.name)
else:
return Interval(self.chrom, self.start, self.end)
def as_tuple3(self):
return self.chrom, self.start, self.end
def as_tuple(self):
return self.chrom, self.start, self.end, self.strand
def as_dict(self):
return {"chrom": self.chrom, "start": self.start, "end": self.end, "strand": self.strand}
def as_Regions(self):
interval_s = pd.Series(self.dict, name=self.tag)
return Regions(interval_s.to_frame().T)
def __repr__(self):
if self.is_stranded:
return f"{self.tag}_{self.strand}"
else:
return self.tag
def length(self):
return self.end - self.start
# class Peak(Interval):
# def __init__(self, peak_id):
# parsed_tag = tag_parser.match(peak_id).groupdict()
# chrom, start, end = parsed_tag["chrom"], int(parsed_tag["start"]), int(parsed_tag["end"])
# super().__init__(chrom, start, end)
# class Gene(Interval):
# def __init__(self, gene_id):
# coords = aals.gene_coords.loc[gene_id]
# super().__init__(**coords)
#----------------------------------------------------------------------------------------------------#
# Regions subclass
#----------------------------------------------------------------------------------------------------#
class Regions(pd.DataFrame):
_df,_pr = None,None
@property
def _constructor(self):
# return Regions
if "chrom" in self.columns and "start" in self.columns and "end" in self.columns:
return Regions
else:
logger.update("Not formatted as `Regions`. Falling back to dataframe.")
return pd.DataFrame
@property
def is_stranded(self):
return "strand" in self.columns
@property
def is_sorted(self):
shifted = self.shift(fill_value=0)
return ((self["start"] > shifted["start"]) | (self["chrom"] != shifted["chrom"])).all()
#----------------------------------------------------------------------------------------------------#
# Queries
#----------------------------------------------------------------------------------------------------#
def get_rna_coverages(self, max_size=10):
return bam.get_coverages_in_regions(aals.rna_bams, self)
def get_atac_coverages(self, max_size=10):
return bam.get_coverages_in_regions(aals.atac_bams, self)
#----------------------------------------------------------------------------------------------------#
# Intersect with other regions
#----------------------------------------------------------------------------------------------------#
def in_interval(self, chrom, start, end):
return self[(self["chrom"] == "chrom") & (self["end"] > start) & (self["start"] < end)]
def overlap_with(self, other):
"""Reports features that overlap with other."""
other = _format_input_as_pyranges(other)
overlap_idx = self.pr.overlap(other).__getattr__(self.index.name)
return self.reindex(overlap_idx)
def overlapping_idx(self, other, col_names=None, **kwargs):
"""Reports indices of overlapping intervals in self and other."""
return _get_overlapping_regions(self, other, col_names=col_names)
def adjacent_idx(self, hops):
"""Reports pairs indices of adjacent intervals. Distance is set by `hops`."""
assert isinstance(hops, int) and hops != 0
pos_hops = abs(hops)
chrom_vals = self["chrom"].values
chroms_1, chroms_2 = chrom_vals[:-pos_hops], chrom_vals[pos_hops:]
same_chrom = chroms_1 == chroms_2
names = self.index.name, f"{hops}_hop"
if hops > 0:
return pd.DataFrame((row[:2] for row in zip(self.index[:-pos_hops], self.index[pos_hops:], same_chrom) if row[2]), columns=names)
else:
return pd.DataFrame((row[:2] for row in zip(self.index[pos_hops:], self.index[:-pos_hops], same_chrom) if row[2]), columns=names)
def k_adjacent(self, interval, k=5, gf=None, report_distance=True):
"""Gets the k nearest intervals in either direction."""
interval = unpack_interval_arg(interval)
contig_features = self[self["chrom"] == interval.chrom]
nearest_feature = contig_features.k_nearest(interval, k=1, gf=gf, report_distance=False).index[0]
nearest_idx = np.where(contig_features.index == nearest_feature)[0][0]
lower_idx, upper_idx = max(nearest_idx-k, 0), min(nearest_idx+k, len(contig_features))
regions = contig_features.iloc[lower_idx:upper_idx]
if report_distance: regions = regions.assign(distance=self.distances_from_interval(interval, gf=gf))
return regions
def k_nearest(self, interval, k=10, gf=None, report_distance=True):
"""Gets k nearest features by absolute distance."""
interval = unpack_interval_arg(interval)
distances = self.distances_from_interval(interval, gf=gf)
regions = self.reindex(distances.abs().sort_values()[:k].index).sort()
if report_distance: regions = regions.assign(distance=self.distances_from_interval(interval, gf=gf))
return regions
def previous_feature(self, interval, n=1, gf=None, report_distance=True):
"""Gets k nearest features by absolute distance."""
interval = unpack_interval_arg(interval)
adjacent_intervals = self.k_adjacent(interval, k=1, gf=gf, report_distance=False)
return Interval.load(adjacent_intervals.iloc[0])
#----------------------------------------------------------------------------------------------------#
# Converters and I/O
#----------------------------------------------------------------------------------------------------#
@property
def annotate(self):
# if self.omic == "rna":
# return self.assign(symbol=self["gene_id"].map(data.ensg))
pass
@property
def pr(self):
if self._pr is None:
self._pr = df_to_pr(self.df)
return self._pr
def bed(self, strand_fill="."):
"""Convert `Regions` object to BED format."""
pass
@property
def df(self):
if self._df is None:
self._df = pd.DataFrame(self)
return self._df
def write_bed(self, path):
# TODO
pass
#----------------------------------------------------------------------------------------------------#
# Utility methods
#----------------------------------------------------------------------------------------------------#
@property
def tags(self):
return self["chrom"] + ":" + self["start"].astype(str) + "-" + self["end"].astype(str)
def set_index_to_tags(self, name="peak_id"):
new_regions = self.copy()
new_regions.index = self.tags
new_regions.index.name = name
return new_regions
def sort(self):
return sort_regions(self)
#----------------------------------------------------------------------------------------------------#
# Constructors
#----------------------------------------------------------------------------------------------------#
def unstrand(self):
return self.drop(columns=["strand"], errors="ignore")
def widen(self, w):
"""Expand region by w."""
new_regions = self.copy()
new_regions["start"] -= w
new_regions["end"] += w
return new_regions
def slide(self, s):
"""Slide region by s."""
new_regions = self.copy()
if self.is_stranded:
s = self["strand"].replace({"+":s, "-":-s})
new_regions["start"] += s
new_regions["end"] += s
return new_regions
def transform(self, w=0, s=0):
new_regions = self.copy()
if self.is_stranded:
s = self["strand"].replace({"+":s, "-":-s})
new_regions["start"] += s - w
new_regions["end"] += s + w
return new_regions
#----------------------------------------------------------------------------------------------------#
# Access positions
#----------------------------------------------------------------------------------------------------#
@property
def start(self):
new_regions = self.copy()
new_regions["end"] = new_regions["start"] + 1
return new_regions
@property
def end(self):
new_regions = self.copy()
new_regions["start"] = new_regions["end"] - 1
return new_regions
@property
def mid(self):
new_regions = self.copy()
new_regions["start"] = (new_regions["start"] + new_regions["end"]) // 2
new_regions["end"] = new_regions["start"]
return new_regions
@property
def tss(self):
new_regions = self.copy()
new_regions["start"] = np.where(new_regions["strand"] == "+", new_regions["start"], new_regions["end"]-1)
new_regions["end"] = new_regions["start"] + 1
return new_regions
@property
def tes(self):
new_regions = self.copy()
new_regions["start"] = np.where(new_regions["strand"] == "-", new_regions["start"], new_regions["end"]-1)
new_regions["end"] = new_regions["start"] + 1
return new_regions
@property
def start_pos(self):
return self["start"].copy()
@property
def end_pos(self):
return self["end"].copy()
@property
def mid_pos(self):
return ((self["start"] + self["end"]) // 2).rename("mid")
@property
def tss_pos(self):
tss_pos = np.where(self["strand"] == "+", self["start"], self["end"])
return pd.Series(data=tss_pos, index=self.index, name="tss")
@property
def tes_pos(self):
tes_pos = np.where(self["strand"] == "-", self["start"], self["end"])
return | pd.Series(data=tes_pos, index=self.index, name="tes") | pandas.Series |
import pandas as pd
import collections
import numpy as np
import cvxopt as opt
from cvxopt import blas, solvers
from pandas.plotting import register_matplotlib_converters
import datetime
| register_matplotlib_converters() | pandas.plotting.register_matplotlib_converters |
import os, sys, json, warnings, logging as log
import pandas as pd, tqdm, dpath
import annotate, collect
from pprint import pprint
def make_items(iter_labeled_meta, iter_all_meta, n_unlabeled, read_rows):
'''Generate metadata from gold-standard and unlabled'''
labeled_items = [(meta, read_rows(meta['url']))
for meta in iter_labeled_meta]
annotated_table_urls = set([meta['url'] for meta, _ in labeled_items])
unlabeled_meta = []
for meta in iter_all_meta:
if (n_unlabeled is not None) and len(unlabeled_meta) >= n_unlabeled:
break
if meta['url'] not in annotated_table_urls:
unlabeled_meta.append(meta)
unlabeled_items = [(meta, read_rows(meta['url']))
for meta in unlabeled_meta]
return labeled_items, unlabeled_items
def make_labelquery(args):
querytype, template, slots, value, templates, namespace, kbdomain, name = args
return querytype, name, annotate.make_labelquery(*args)
def parallel_query(labelqueries,
templates,
namespace,
kbdomain,
max_workers=1):
import tqdm, multiprocessing
with multiprocessing.Pool(max_workers) as p:
stream_args = [(q['label'], q['template'], q['slots'], q['value'],
templates, namespace, kbdomain, name)
for name, q in labelqueries.items()]
t = len(stream_args)
# yield from tqdm.tqdm(p.imap_unordered(make_labelquery, stream_args), total=t)
yield from p.imap_unordered(make_labelquery, stream_args)
def cache_labelquery_results(modeldir,
namespace,
kbdomain,
selected_queries=[],
results_fname=None,
parallel=False,
verbose=False):
labelqueries, templates = annotate.load_labelqueries_templates(modeldir)
if not results_fname:
os.makedirs(os.path.join(modeldir, 'labelqueries', 'cache'),
exist_ok=True)
results_fname = os.path.join(modeldir, 'labelqueries', 'cache',
'results.json')
labelquery_results = load_labelquery_results(modeldir,
results_fname=results_fname)
l = len(labelqueries)
if parallel:
if selected_queries:
labelqueries = {
name: q
for name, q in labelqueries.items() if name in selected_queries
}
lqs = parallel_query(labelqueries,
templates,
namespace,
kbdomain,
max_workers=parallel)
for qt, name, lq in lqs:
labelquery_results.setdefault(qt, {})[name] = lq
else:
for i, (name, q) in enumerate(labelqueries.items()):
if selected_queries and (name not in selected_queries):
continue
lq = annotate.make_labelquery(q['label'],
q['template'],
q['slots'],
q['value'],
templates,
namespace,
kbdomain=kbdomain,
name=name)
if verbose:
print(len(lq.transformations), 'results')
labelquery_results.setdefault(q['label'], {})[name] = lq
with open(results_fname, 'w') as fw:
results_json = {
label: {name: vars(lq)
for name, lq in lqs.items()}
for label, lqs in labelquery_results.items()
}
json.dump(results_json, fw, indent=2)
with open(results_fname.replace('.json', '.stats.json'), 'w') as fw:
results_json = {
name: len(lq.transformations)
for label, lqs in labelquery_results.items()
for name, lq in lqs.items()
}
json.dump(results_json, fw, indent=2)
return labelquery_results
def load_labelquery_results(modeldir, results_fname=None):
typed_labelqueries = {}
if not results_fname:
os.makedirs(os.path.join(modeldir, 'labelqueries', 'cache'),
exist_ok=True)
results_fname = os.path.join(modeldir, 'labelqueries', 'cache',
'results.json')
if os.path.exists(results_fname):
typed_labelqueries = json.load(open(results_fname))
for lq_type, labelqueries in typed_labelqueries.items():
for name, lq_params in labelqueries.items():
labelqueries[name] = annotate.LabelQuery(**lq_params)
return typed_labelqueries
def transform_all(labelqueries, unlabeled_items, model, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
id_items = {m['@id']: (m, r) for m, r in unlabeled_items}
lX = []
lq_labels = {}
l = len(labelqueries)
for i, (name, lq) in enumerate(labelqueries.items()):
print(f'Transforming using query {name:>4s} [{i+1:3d}/{l:3d}] ...',
end='\r',
file=sys.stderr)
# Get corresponding metadata for query results
selected_items = [
id_items[i] for i in lq.transformations if i in id_items
]
transformed_items = tuple(
zip(*[(lq.transform(m, r, **kwargs), r)
for m, r in selected_items]))
if transformed_items:
recs = tuple(
zip(*model.__class__.make_records(*transformed_items)))
if recs:
qlX, qly = recs
qlX = pd.DataFrame.from_records(list(qlX)).set_index('@id')
lX.append(qlX)
lq_labels[name] = pd.Series(qly, index=qlX.index)
print(file=sys.stderr)
lX = pd.concat(lX).drop_duplicates().replace([pd.np.nan], 0)
L = pd.DataFrame(index=lX.index) # rows: n_labelqueries x cols: labels
for lqname, qly in lq_labels.items():
L[lqname] = qly
return lX, L
def get_query_labelings(labeled_metas, labelqueries):
item_query_label = {}
for meta in labeled_metas:
for qid, lq in labelqueries.items():
for p, v in lq.transformations.get(meta['@id'], {}).items():
if v:
item_query_label.setdefault((meta['url'], p), {})[qid] = v
L = pd.DataFrame.from_dict(item_query_label, orient='index')
return L
def get_true_labelings(labeled_metas, eval_path):
item_truelabel = {}
for meta in labeled_metas:
for p, v in dpath.util.search(meta, eval_path, yielded=True):
if not meta.get('karma:isBad'):
item_truelabel[(meta['url'], p)] = v or None
return | pd.Series(item_truelabel) | pandas.Series |
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import itertools
import requests
import re
import os
import json
from altair import *
from fileops import save
from collections import Counter
data = {
"NVIDIA": {
"url": "https://en.wikipedia.org/wiki/List_of_Nvidia_graphics_processing_units"
},
"AMD": {
"url": "https://en.wikipedia.org/wiki/List_of_AMD_graphics_processing_units",
},
}
referencesAtEnd = r"(?:\s*\[\d+\])+(?:\d+,)?(?:\d+)?$"
for vendor in ["NVIDIA", "AMD"]:
# requests.get handles https
html = requests.get(data[vendor]["url"]).text
# oddly, some dates look like:
# <td><span class="sortkey" style="display:none;speak:none">000000002010-02-25-0000</span><span style="white-space:nowrap">Feb 25, 2010</span></td>
html = re.sub(r'<span [^>]*style="display:none[^>]*>([^<]+)</span>', "", html)
html = re.sub(r"<span[^>]*>([^<]+)</span>", r"\1", html)
# someone writes "1234" as "1 234", sigh
html = re.sub(r"(\d) (\d)", r"\1\2", html)
html = re.sub(r" ", "", html) # delete thin space (thousands sep)
html = re.sub(r" ", "", html) # delete thin space (thousands sep)
html = re.sub("\xa0", " ", html) # non-breaking space -> ' '
html = re.sub(r" ", " ", html) # non-breaking space -> ' '
html = re.sub(r" ", " ", html) # non-breaking space -> ' '
html = re.sub(r"<br />", " ", html) # breaking space -> ' '
html = re.sub("\u2012", "-", html) # figure dash -> '-'
html = re.sub("\u2013", "-", html) # en-dash -> '-'
html = re.sub(r"mm<sup>2</sup>", "mm2", html) # mm^2 -> mm2
html = re.sub("\u00d710<sup>6</sup>", "\u00d7106", html) # 10^6 -> 106
html = re.sub("\u00d710<sup>9</sup>", "\u00d7109", html) # 10^9 -> 109
html = re.sub(r"<sup>\d+</sup>", "", html) # delete footnotes
# with open('/tmp/%s.html' % vendor, 'wb') as f:
# f.write(html.encode('utf8'))
dfs = pd.read_html(
html, match=re.compile("Launch|Release Date & Price"), parse_dates=True
)
for idx, df in enumerate(dfs):
# Multi-index to index
# column names that are duplicated should be unduplicated
# 'Launch Launch' -> 'Launch'
# TODO do this
# ' '.join(a for a, b in itertools.zip_longest(my_list, my_list[1:]) if a != b)`
# print(df.columns.values)
df.columns = [
" ".join(
a
for a, b in itertools.zip_longest(col, col[1:])
if (a != b and not a.startswith("Unnamed: "))
).strip()
for col in df.columns.values
]
# df.columns = [' '.join(col).strip() for col in df.columns.values]
# TODO this next one disappears
# Get rid of 'Unnamed' column names
# df.columns = [re.sub(' Unnamed: [0-9]+_level_[0-9]+', '', col)
# for col in df.columns.values]
# If a column-name word ends in a number or number,comma,number, delete
# it
df.columns = [
" ".join([re.sub("[\d,]+$", "", word) for word in col.split()])
for col in df.columns.values
]
# If a column-name word ends with one or more '[x]',
# where 'x' is an upper- or lower-case letter or number, delete it
df.columns = [
" ".join(
[re.sub(r"(?:\[[a-zA-Z0-9]+\])+$", "", word) for word in col.split()]
)
for col in df.columns.values
]
# Get rid of hyphenation in column names
df.columns = [col.replace("- ", "") for col in df.columns.values]
# Get rid of space after slash in column names
df.columns = [col.replace("/ ", "/") for col in df.columns.values]
# Get rid of trailing space in column names
df.columns = df.columns.str.strip()
df["Vendor"] = vendor
if ("Launch" not in df.columns.values) and (
"Release Date & Price" in df.columns.values
):
# take everything up to ####
df["Launch"] = df["Release Date & Price"].str.extract(
r"^(.*\d\d\d\d)", expand=False
)
df["Release Price (USD)"] = df["Release Date & Price"].str.extract(
r"\$([\d,]+)", expand=False
)
# make sure Launch is a string (dtype=object) before parsing it
df["Launch"] = df["Launch"].apply(lambda x: str(x))
df["Launch"] = df["Launch"].str.replace(referencesAtEnd, "")
df["Launch"] = df["Launch"].apply(
lambda x: pd.to_datetime(x, infer_datetime_format=True, errors="coerce")
)
# if we have duplicate column names, we will see them here
# pd.concat will fail if so
if [c for c in Counter(df.columns).items() if c[1] > 1]:
# so remove them
# https://stackoverflow.com/questions/14984119/python-pandas-remove-duplicate-columns
df = df.loc[:, ~df.columns.duplicated()]
# this assignment is because I don't have a way to do the above line
# in-place
dfs[idx] = df
data[vendor]["dfs"] = dfs
df = pd.concat(
data["NVIDIA"]["dfs"] + data["AMD"]["dfs"], sort=False, ignore_index=True
)
# print all columns
# print(list(df))
def merge(df, dst, src, replaceNoWithNaN=False, delete=True):
if replaceNoWithNaN:
df[src] = df[src].replace("No", np.nan)
df[dst] = df[dst].fillna(df[src])
if delete:
df.drop(src, axis=1, inplace=True)
return df
# merge related columns
df = merge(df, "Model", "Model Units")
df = merge(
df, "Processing power (GFLOPS) Single precision", "Processing power (GFLOPS)"
)
df = merge(
df,
"Processing power (GFLOPS) Single precision",
"Processing power (GFLOPS) Single precision (MAD+MUL)",
replaceNoWithNaN=True,
)
df = merge(
df,
"Processing power (GFLOPS) Single precision",
"Processing power (GFLOPS) Single precision (MAD or FMA)",
replaceNoWithNaN=True,
)
df = merge(
df,
"Processing power (GFLOPS) Double precision",
"Processing power (GFLOPS) Double precision (FMA)",
replaceNoWithNaN=True,
)
df = merge(df, "Memory Bandwidth (GB/s)", "Memory configuration Bandwidth (GB/s)")
df = merge(df, "TDP (Watts)", "TDP (Watts) Max.")
df = merge(df, "TDP (Watts)", "TBP (W)")
# get only the number out of TBP
# TODO this doesn't work - these numbers don't appear
df["TBP"] = df["TBP"].str.extract(r"([\d]+)", expand=False)
df = merge(df, "TDP (Watts)", "TBP")
df = merge(df, "TDP (Watts)", "TBP (Watts)")
# fix up watts?
# df['TDP (Watts)'] = df['TDP (Watts)'].str.extract(r'<([\d\.]+)', expand=False)
df = merge(df, "Model", "Model (Codename)")
df = merge(df, "Model", "Model (codename)")
# df = merge(df, 'Model', 'Chip (Device)')
# replace when AMD page updated
df = merge(df, "Model", "Model: Mobility Radeon")
df = merge(df, "Core clock (MHz)", "Clock rate Base (MHz)")
df = merge(df, "Core clock (MHz)", "Clock speeds Base core clock (MHz)")
df = merge(df, "Core clock (MHz)", "Core Clock (MHz)")
df = merge(df, "Core clock (MHz)", "Clock rate Core (MHz)")
df = merge(df, "Core clock (MHz)", "Clock speed Core (MHz)")
df = merge(df, "Core clock (MHz)", "Clock speed Average (MHz)")
df = merge(df, "Core clock (MHz)", "Core Clock rate (MHz)")
df = merge(df, "Core clock (MHz)", "Clock rate (MHz) Core (MHz)")
df = merge(df, "Core config", "Core Config")
df = merge(df, "Transistors Die Size", "Transistors & Die Size")
df = merge(df, "Memory Bus type", "Memory RAM type")
df = merge(df, "Memory Bus type", "Memory Type")
df = merge(df, "Memory Bus type", "Memory configuration DRAM type")
df = merge(df, "Memory Bus width (bit)", "Memory configuration Bus width (bit)")
df = merge(df, "Release Price (USD)", "Release price (USD)")
df = merge(df, "Release Price (USD)", "Release price (USD) MSRP")
# filter out {Chips, Code name, Core config}: '^[2-9]\u00d7'
df = df[~df["Chips"].str.contains(r"^[2-9]\u00d7", re.UNICODE, na=False)]
df = df[~df["Code name"].str.contains(r"^[2-9]\u00d7", re.UNICODE, na=False)]
df = df[~df["Core config"].str.contains(r"^[2-9]\u00d7", re.UNICODE, na=False)]
# filter out if Model ends in [xX]2
df = df[~df["Model"].str.contains("[xX]2$", na=False)]
# filter out {transistors, die size} that end in x2
df = df[
~df["Transistors (million)"].str.contains(r"\u00d7[2-9]$", re.UNICODE, na=False)
]
df = df[~df["Die size (mm2)"].str.contains(r"\u00d7[2-9]$", re.UNICODE, na=False)]
for prec in ["Single", "Double", "Half"]:
col = f"Processing power (TFLOPS) {prec} precision"
destcol = f"Processing power (GFLOPS) {prec} precision"
df[col] = df[col].astype(str)
df[col] = df[col].str.replace(",", "") # get rid of commas
df[col] = df[col].str.extract(r"^([\d\.]+)", expand=False)
df[col] = pd.to_numeric(df[col]) * 1000.0 # change to GFLOPS
df = merge(df, destcol, col)
# merge GFLOPS columns with "Boost" column headers and rename
for prec in ["Single", "Double", "Half"]: # single before others for '1/16 SP'
col = "Processing power (GFLOPS) %s precision" % prec
spcol = "%s-precision GFLOPS" % "Single"
# if prec != 'Half':
# df = merge(
# df, col, 'Processing power (GFLOPS) %s precision Base Core (Base Boost) (Max Boost 2.0)' % prec)
for (
srccol
) in [ # 'Processing power (GFLOPS) %s precision Base Core (Base Boost) (Max Boost 3.0)',
# 'Processing power (GFLOPS) %s precision R/F.E Base Core Reference (Base Boost) F.E. (Base Boost) R/F.E. (Max Boost 4.0)',
"Processing power (GFLOPS) %s"
]:
df = merge(df, col, srccol % prec)
# handle weird references to single-precision column
if prec != "Single":
df.loc[df[col] == "1/16 SP", col] = pd.to_numeric(df[spcol]) / 16
df.loc[df[col] == "2x SP", col] = pd.to_numeric(df[spcol]) * 2
# pick the first number we see as the actual number
df[col] = df[col].astype(str)
df[col] = df[col].str.replace(",", "") # get rid of commas
df[col] = df[col].str.extract(r"^([\d\.]+)", expand=False)
# convert TFLOPS to GFLOPS
# tomerge = 'Processing power (TFLOPS) %s Prec.' % prec
# df[col] = df[col].fillna(
# pd.to_numeric(df[tomerge].str.split(' ').str[0], errors='coerce') * 1000.0)
# df.drop(tomerge, axis=1, inplace=True)
df = df.rename(columns={col: "%s-precision GFLOPS" % prec})
# split out 'transistors die size'
# example: u'292\u00d7106 59 mm2'
for exponent in ["\u00d7106", "\u00d7109", "B"]:
dftds = df["Transistors Die Size"].str.extract(
"^([\d\.]+)%s (\d+) mm2" % exponent, expand=True
)
if exponent == "\u00d7106":
df["Transistors (million)"] = df["Transistors (million)"].fillna(
pd.to_numeric(dftds[0], errors="coerce")
)
if exponent == "\u00d7109" or exponent == "B":
df["Transistors (billion)"] = df["Transistors (billion)"].fillna(
pd.to_numeric(dftds[0], errors="coerce")
)
df["Die size (mm2)"] = df["Die size (mm2)"].fillna(
| pd.to_numeric(dftds[1], errors="coerce") | pandas.to_numeric |
from sqlalchemy import create_engine
import pandas as pd
import os
from dotenv import load_dotenv
import numpy as np
import re
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
import json
import missingno as msno
class Marketplace():
"""
Clean provided dataset and prepare for ML
Process:
- Check for existing downloaded data
- download if not present
- import from csv if present
- Remove N/A records
- Split Category and Product_name data
- store each subsection in its own column
- Remove currency symbols
- Replace categories with numbers
- Get longitude and latitude from location text
- Export new data to csv and categories to json
Args:
- None
Returns:
- None
"""
def __init__(self):
"""
Initialise new Marketplace object
"""
pass
def not_already_downloaded(self) -> bool:
"""
Checks for presence of csv file of already processed data
Args:
- None
Returns:
- Bool: True if no csv is present
"""
if os.path.exists('data/cleaned.csv'):
print("Existing data found")
return False
else:
print("No csv present")
return True
def load_all_existing_data_to_dfs(self):
"""
If csv information available, loads it to num_df
Args:
- None
Returns:
- None
"""
self.num_df = pd.read_csv(
'data/cleaned.csv',
header=0,
delimiter=",",
engine='python')
with open('data/category.json', 'r') as jf:
self.cat_headings = dict(json.load(jf))
with open('data/minor_category.json', 'r') as jf:
self.minor_cat_headings = dict(json.load(jf))
with open('data/sub_category.json', 'r') as jf:
self.sub_cat_headings = dict(json.load(jf))
def connect_to_RDS_engine(self):
"""
When there's no csv, we go online to get the data.
Args:
- None
Returns:
- None
"""
load_dotenv()
DATABASE_TYPE = os.environ.get('DATABASE_TYPE')
DBAPI = os.environ.get('DBAPI')
ENDPOINT = os.environ.get('ENDPOINT')
DBUSER = os.environ.get('DBUSER')
DBPASSWORD = os.environ.get('DBPASSWORD')
PORT = 5432
DATABASE = os.environ.get('DATABASE')
engine = create_engine(f"{DATABASE_TYPE}+{DBAPI}://{DBUSER}:"
f"{DBPASSWORD}@{ENDPOINT}:"
f"{PORT}/{DATABASE}")
engine.connect()
main_df = pd.read_sql_table(
'products', self.engine,
columns=["id", "product_name", "category", "product_description",
"price", "location",
"page_id", "create_time"])
return main_df
def remove_n_a_records(self, column: str):
"""
Scan the column for records with all N/As. Get rid of them
Args:
column (str): The column currently being scanned.
"""
# Swap N/A for the pandas nan, so we can drop them
temp_df = self.main_df[column].replace('N/A', np.nan)
temp_df = temp_df.dropna()
# Create a new df with only the records without the nans
clean_df = pd.merge(temp_df, self.main_df,
left_index=True, right_index=True)
# The merge creates a duplicate column. Remove it.
clean_df.drop(column + '_x', inplace=True, axis=1)
# Rename the remaining category column
clean_df.rename(columns={column + '_y': column}, inplace=True)
# Commit the cleansed data to the dataframe
self.main_df = clean_df
def split_heirarchies(self, col: str, character: str, no_cols: int):
"""
Takes in a column name and splits data to columns based on sep. char.
Args:
col (str): _description_
character (str): _description_
no_cols (int): _description_
"""
self.main_df[[col+str(i) for i in range(no_cols)]] = (
self.main_df[col].str.split(character, expand=True))
self.main_df = self.main_df.drop(col, axis=1)
if col == 'category':
for i in range(no_cols):
if i > 2:
self.main_df = self.main_df.drop(col+str(i), axis=1)
def clean_columns(self, num: int):
"""
Removes unnecessary columns generated by split_heirarchies func.
Renames product_name and category columns accordingly
Args:
num (int): The number of columns to keep
"""
# categories - remove anything after category_2
cols = [('product_name' + str(i)) for i in range(1, (num))]
for column in cols:
self.main_df = self.main_df.drop(column, axis=1)
self.main_df = self.main_df.rename(
columns={'product_name0': 'product_name',
'category0': 'category',
'category1': 'sub_category',
'category2': 'minor_category'})
self.main_df['category'] = self.main_df['category'].apply(
lambda x: x.strip(' |') if not(pd.isnull(x)) else x)
self.main_df['sub_category'] = self.main_df['sub_category'].apply(
lambda x: x.strip(' |') if not(pd.isnull(x)) else x)
self.main_df['minor_category'] = self.main_df['minor_category'].apply(
lambda x: x.strip(' |') if not(pd.isnull(x)) else x)
self.main_df['product_name'] = self.main_df['product_name'].apply(
lambda x: x.strip(' |') if not( | pd.isnull(x) | pandas.isnull |
from io import StringIO
import subprocess
import pandas as pd
import os
# Time columns in job records
# If we exclude PENDING jobs (that we do in slurm_raw_processing), all time columns should have a time stamp,
# except RUNNING jobs that do not have the 'End' stamp.
time_columns = ['Eligible','Submit','Start','End']
# Define what constitutes a duplicate job
duplicate_job_def = ['JobID','Submit','Start']
def sacct_jobs(account_query, d_from, d_to='', debugging=False,
serialize_frame='', slurm_names=False):
"""Ingest job record information from slurm via sacct and return DataFrame.
Parameters
-------
account_query: str
String query to be sent to sacct via -A flag.
d_from: date str
Beginning of the query period, e.g. '2019-04-01T00:00:00
debugging: boolean, optional
Boolean for reporting progress to stdout. Default False.
sacct_file: str, optional
Loads a raw query from file.
If empty, query is rerun. Defaults to the empty string.
serialize_frame: str, optional
Pickle the resulting DataFrame.
If empty, pickling is skipped. Defaults to the empty string.
slurm_names: str, optional
Keep slurm's sacct column names instead of shorthands.
Defaults to False.
Returns
-------
DataFrame
Returns a standard pandas DataFrame, or an empty dataframe if no
jobs are found.
"""
raw_frame = _get_slurm_records(pd.to_datetime(d_from))
out_frame = _slurm_raw_processing(raw_frame, slurm_names)
# Legacy/consistency check:
# Protect end time for jobs that are still currently running
out_frame['end'] = out_frame['end'].replace({pd.NaT: | pd.to_datetime(d_to) | pandas.to_datetime |
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import os
import tempfile
import unittest
# noinspection PyPackageRequirements
import pytest
from pandas.tests.extension import base
from text_extensions_for_pandas.array.test_span import ArrayTestBase
from text_extensions_for_pandas.array.span import *
from text_extensions_for_pandas.array.token_span import *
class TokenSpanTest(ArrayTestBase):
def test_create(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 1)
self.assertEqual(s1.covered_text, "This")
# Begin too small
with self.assertRaises(ValueError):
TokenSpan(toks, -2, 4)
# End too small
with self.assertRaises(ValueError):
TokenSpan(toks, 1, -1)
# End too big
with self.assertRaises(ValueError):
TokenSpan(toks, 1, 10)
# Begin null, end not null
with self.assertRaises(ValueError):
TokenSpan(toks, TokenSpan.NULL_OFFSET_VALUE, 0)
def test_repr(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 2)
self.assertEqual(repr(s1), "[0, 7): 'This is'")
toks2 = SpanArray(
"This is a really really really really really really really really "
"really long string.",
np.array([0, 5, 8, 10, 17, 24, 31, 38, 45, 52, 59, 66, 73, 78, 84]),
np.array([4, 7, 9, 16, 23, 30, 37, 44, 51, 58, 65, 72, 77, 84, 85]),
)
self._assertArrayEquals(
toks2.covered_text,
[
"This",
"is",
"a",
"really",
"really",
"really",
"really",
"really",
"really",
"really",
"really",
"really",
"long",
"string",
".",
],
)
s2 = TokenSpan(toks2, 0, 4)
self.assertEqual(repr(s2), "[0, 16): 'This is a really'")
s2 = TokenSpan(toks2, 0, 15)
self.assertEqual(
repr(s2),
"[0, 85): 'This is a really really really really really really "
"really really really [...]'"
)
def test_equals(self):
toks = self._make_spans_of_tokens()
other_toks = toks[:-1].copy()
s1 = TokenSpan(toks, 0, 2)
s2 = TokenSpan(toks, 0, 2)
s3 = TokenSpan(toks, 0, 3)
s4 = TokenSpan(other_toks, 0, 2)
s5 = Span(toks.target_text, s4.begin, s4.end)
s6 = Span(toks.target_text, s4.begin, s4.end + 1)
self.assertEqual(s1, s2)
self.assertNotEqual(s1, s3)
self.assertEqual(s1, s4)
self.assertEqual(s1, s5)
self.assertEqual(s5, s1)
self.assertNotEqual(s1, s6)
def test_less_than(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 2, 3)
s3 = TokenSpan(toks, 3, 4)
self.assertLess(s1, s3)
self.assertLessEqual(s1, s3)
self.assertFalse(s1 < s2)
def test_add(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 2, 3)
s3 = TokenSpan(toks, 3, 4)
char_s1 = Span(s1.target_text, s1.begin, s1.end)
char_s2 = Span(s2.target_text, s2.begin, s2.end)
self.assertEqual(s1 + s2, s1)
self.assertEqual(char_s1 + s2, char_s1)
self.assertEqual(s2 + char_s1, char_s1)
self.assertEqual(char_s2 + char_s1, char_s1)
self.assertEqual(s2 + s3, TokenSpan(toks, 2, 4))
def test_hash(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 0, 3)
s3 = TokenSpan(toks, 3, 4)
d = {s1: "foo"}
self.assertEqual(d[s1], "foo")
self.assertEqual(d[s2], "foo")
d[s2] = "bar"
d[s3] = "fab"
self.assertEqual(d[s1], "bar")
self.assertEqual(d[s2], "bar")
self.assertEqual(d[s3], "fab")
class TokenSpanArrayTest(ArrayTestBase):
def _make_spans(self):
toks = self._make_spans_of_tokens()
return TokenSpanArray(toks, [0, 1, 2, 3, 0, 2, 0], [1, 2, 3, 4, 2, 4, 4])
def test_create(self):
arr = self._make_spans()
self._assertArrayEquals(
arr.covered_text,
["This", "is", "a", "test", "This is", "a test", "This is a test"],
)
with self.assertRaises(TypeError):
TokenSpanArray(self._make_spans_of_tokens(), "Not a valid begins list", [42])
def test_dtype(self):
arr = self._make_spans()
self.assertTrue(isinstance(arr.dtype, TokenSpanDtype))
def test_len(self):
self.assertEqual(len(self._make_spans()), 7)
def test_getitem(self):
arr = self._make_spans()
self.assertEqual(arr[2].covered_text, "a")
self._assertArrayEquals(arr[2:4].covered_text, ["a", "test"])
def test_setitem(self):
arr = self._make_spans()
arr[1] = arr[2]
self._assertArrayEquals(arr.covered_text[0:4], ["This", "a", "a", "test"])
arr[3] = None
self._assertArrayEquals(arr.covered_text[0:4], ["This", "a", "a", None])
with self.assertRaises(ValueError):
arr[0] = "Invalid argument for __setitem__()"
arr[0:2] = arr[0]
self._assertArrayEquals(arr.covered_text[0:4], ["This", "This", "a", None])
arr[[0, 1, 3]] = None
self._assertArrayEquals(arr.covered_text[0:4], [None, None, "a", None])
arr[[2, 1, 3]] = arr[[4, 5, 6]]
self._assertArrayEquals(
arr.covered_text[0:4], [None, "a test", "This is", "This is a test"]
)
def test_equals(self):
arr = self._make_spans()
self._assertArrayEquals(arr[0:4] == arr[1], [False, True, False, False])
arr2 = self._make_spans()
self._assertArrayEquals(arr == arr, [True] * 7)
self._assertArrayEquals(arr == arr2, [True] * 7)
self._assertArrayEquals(arr[0:3] == arr[3:6], [False, False, False])
arr3 = SpanArray(arr.target_text, arr.begin, arr.end)
self._assertArrayEquals(arr == arr3, [True] * 7)
self._assertArrayEquals(arr3 == arr, [True] * 7)
def test_not_equals(self):
arr = self._make_spans()
arr2 = self._make_spans()
self._assertArrayEquals(arr[0:4] != arr[1], [True, False, True, True])
self._assertArrayEquals(arr != arr2, [False] * 7)
self._assertArrayEquals(arr[0:3] != arr[3:6], [True, True, True])
def test_concat_same_type(self):
arr = self._make_spans()
arr2 = self._make_spans()
# Type: TokenSpanArray
arr3 = TokenSpanArray._concat_same_type((arr, arr2))
self._assertArrayEquals(arr3.covered_text, np.tile(arr2.covered_text, 2))
def test_from_factorized(self):
arr = self._make_spans()
spans_list = [arr[i] for i in range(len(arr))]
arr2 = TokenSpanArray._from_factorized(spans_list, arr)
self._assertArrayEquals(arr.covered_text, arr2.covered_text)
def test_from_sequence(self):
arr = self._make_spans()
spans_list = [arr[i] for i in range(len(arr))]
arr2 = TokenSpanArray._from_sequence(spans_list)
self._assertArrayEquals(arr.covered_text, arr2.covered_text)
def test_nulls(self):
arr = self._make_spans()
self._assertArrayEquals(arr.isna(), [False] * 7)
self.assertFalse(arr.have_nulls)
arr[2] = TokenSpan.make_null(arr.tokens)
self.assertIsNone(arr.covered_text[2])
self._assertArrayEquals(arr[0:4].covered_text, ["This", "is", None, "test"])
self._assertArrayEquals(arr[0:4].isna(), [False, False, True, False])
self.assertTrue(arr.have_nulls)
def test_copy(self):
arr = self._make_spans()
arr2 = arr.copy()
self._assertArrayEquals(arr.covered_text, arr2.covered_text)
self.assertEqual(arr[1], arr2[1])
arr[1] = TokenSpan.make_null(arr.tokens)
self.assertNotEqual(arr[1], arr2[1])
# Double underscore because you can't call a test case "test_take"
def test_take(self):
arr = self._make_spans()
arr2 = arr.take([1, 1, 2, 3, 5, -1])
self._assertArrayEquals(
arr2.covered_text, ["is", "is", "a", "test", "a test", "This is a test"]
)
arr3 = arr.take([1, 1, 2, 3, 5, -1], allow_fill=True)
self._assertArrayEquals(
arr3.covered_text, ["is", "is", "a", "test", "a test", None]
)
def test_less_than(self):
tokens = self._make_spans_of_tokens()
arr1 = TokenSpanArray(tokens, [0, 2], [4, 3])
s1 = TokenSpan(tokens, 0, 1)
s2 = TokenSpan(tokens, 3, 4)
arr2 = TokenSpanArray(tokens, [0, 3], [0, 4])
self._assertArrayEquals(s1 < arr1, [False, True])
self._assertArrayEquals(s2 > arr1, [False, True])
self._assertArrayEquals(arr1 < s1, [False, False])
self._assertArrayEquals(arr1 < arr2, [False, True])
def test_add(self):
toks = self._make_spans_of_tokens()
s1 = TokenSpan(toks, 0, 3)
s2 = TokenSpan(toks, 2, 3)
s3 = TokenSpan(toks, 3, 4)
s4 = TokenSpan(toks, 2, 4)
s5 = TokenSpan(toks, 0, 3)
char_s1 = Span(s1.target_text, s1.begin, s1.end)
char_s2 = Span(s2.target_text, s2.begin, s2.end)
char_s3 = Span(s3.target_text, s3.begin, s3.end)
char_s4 = Span(s4.target_text, s4.begin, s4.end)
char_s5 = Span(s5.target_text, s5.begin, s5.end)
# TokenSpanArray + TokenSpanArray
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3])
+ TokenSpanArray._from_sequence([s2, s3, s3]),
TokenSpanArray._from_sequence([s1, s4, s3]),
)
# SpanArray + TokenSpanArray
self._assertArrayEquals(
SpanArray._from_sequence([char_s1, char_s2, char_s3])
+ TokenSpanArray._from_sequence([s2, s3, s3]),
SpanArray._from_sequence([char_s1, char_s4, char_s3]),
)
# TokenSpanArray + SpanArray
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3])
+ SpanArray._from_sequence([char_s2, char_s3, char_s3]),
SpanArray._from_sequence([char_s1, char_s4, char_s3]),
)
# TokenSpanArray + TokenSpan
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3]) + s2,
TokenSpanArray._from_sequence([s5, s2, s4]),
)
# TokenSpan + TokenSpanArray
self._assertArrayEquals(
s2 + TokenSpanArray._from_sequence([s1, s2, s3]),
TokenSpanArray._from_sequence([s5, s2, s4]),
)
# TokenSpanArray + Span
self._assertArrayEquals(
TokenSpanArray._from_sequence([s1, s2, s3]) + char_s2,
SpanArray._from_sequence([char_s5, char_s2, char_s4]),
)
# Span + SpanArray
self._assertArrayEquals(
char_s2 + SpanArray._from_sequence([char_s1, char_s2, char_s3]),
SpanArray._from_sequence([char_s5, char_s2, char_s4]),
)
def test_reduce(self):
arr = self._make_spans()
self.assertEqual(arr._reduce("sum"), TokenSpan(arr.tokens, 0, 4))
# Remind ourselves to modify this test after implementing min and max
with self.assertRaises(TypeError):
arr._reduce("min")
def test_make_array(self):
arr = self._make_spans()
arr_series = pd.Series(arr)
toks_list = [arr[0], arr[1], arr[2], arr[3]]
self._assertArrayEquals(
TokenSpanArray.make_array(arr).covered_text,
["This", "is", "a", "test", "This is", "a test", "This is a test"],
)
self._assertArrayEquals(
TokenSpanArray.make_array(arr_series).covered_text,
["This", "is", "a", "test", "This is", "a test", "This is a test"],
)
self._assertArrayEquals(
TokenSpanArray.make_array(toks_list).covered_text,
["This", "is", "a", "test"],
)
def test_begin_and_end(self):
arr = self._make_spans()
self._assertArrayEquals(arr.begin, [0, 5, 8, 10, 0, 8, 0])
self._assertArrayEquals(arr.end, [4, 7, 9, 14, 7, 14, 14])
def test_normalized_covered_text(self):
arr = self._make_spans()
self._assertArrayEquals(
arr.normalized_covered_text,
["this", "is", "a", "test", "this is", "a test", "this is a test"],
)
def test_as_frame(self):
arr = self._make_spans()
df = arr.as_frame()
self._assertArrayEquals(
df.columns, ["begin", "end", "begin_token", "end_token", "covered_text"]
)
self.assertEqual(len(df), len(arr))
class TokenSpanArrayIOTests(ArrayTestBase):
def do_roundtrip(self, df):
with tempfile.TemporaryDirectory() as dirpath:
filename = os.path.join(dirpath, 'token_span_array_test.feather')
df.to_feather(filename)
df_read = pd.read_feather(filename)
pd.testing.assert_frame_equal(df, df_read)
def test_feather(self):
toks = self._make_spans_of_tokens()
# Equal token spans to tokens
ts1 = TokenSpanArray(toks, np.arange(len(toks)), np.arange(len(toks)) + 1)
df1 = pd.DataFrame({"ts1": ts1})
self.do_roundtrip(df1)
# More token spans than tokens
ts2 = TokenSpanArray(toks, [0, 1, 2, 3, 0, 2, 0], [1, 2, 3, 4, 2, 4, 4])
df2 = | pd.DataFrame({"ts2": ts2}) | pandas.DataFrame |
import os
import sys
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Ridge
data = pd.read_csv("insurance.csv", usecols=["age", "sex", "bmi", "children", "smoker", "charges"])
data['smoker'] = data['smoker'].replace(to_replace = "yes", value = 1)
data['smoker'] = data['smoker'].replace(to_replace = "no", value = 0)
data['sex'] = data['sex'].replace("male", value = 1)
data['sex'] = data['sex'].replace("female", value = 0)
data = data.join( | pd.get_dummies(data['sex']) | pandas.get_dummies |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# System libs
import json
import numpy
import pandas
from pandas.io.json import json_normalize
import plotly.graph_objs as go
import plotly.offline as plotly
def load_data(path):
with open(path, 'r') as f:
data = json.load(f)
return json_normalize(data)
def prepare(path, times, is_pref):
data = load_data(path)
data = data.loc[(data['args.use_p_ref'] == is_pref) & (data['discr_order'] == 1) & (data['solver_info.final_res_norm'] < 1e-10) & (data['num_flipped'] == 0) & (data['solver_info.num_iterations'] > 0) & (data['solver_info.num_iterations'] < 1000)]
data = data[["args.mesh", "time_assembling_stiffness_mat", "time_assigning_rhs", "time_building_basis", "time_solving", "num_vertices"]]
data["args.mesh"] = data["args.mesh"].str.replace("/scratch/yh1998/polyfem300_mesh/", "")
data["args.mesh"] = data["args.mesh"].str.replace("/scratch/yh1998/polyfem30k/", "")
data["args.mesh"] = data["args.mesh"].str.replace("/beegfs/work/panozzo/p_ref/tetmesh_mesh/", "")
data["args.mesh"] = data["args.mesh"].str.replace("/beegfs/work/panozzo/p_ref/polyfem300_mesh/", "")
data["args.mesh"] = data["args.mesh"].str.replace("_tetmesh.msh.mesh", "")
data["args.mesh"] = data["args.mesh"].str.replace("_polyfem300.msh.mesh", "")
data = | pandas.merge(data, times, left_on="args.mesh", right_on="mesh_id") | pandas.merge |
# -*- coding: utf-8 -*-
"""
This module is EXPERIMENTAL, that means that tests are missing.
The reason is that the coastdat2 dataset is deprecated and will be replaced by
the OpenFred dataset from Helmholtz-Zentrum Geesthacht. It should work though.
This module is designed for the use with the coastdat2 weather data set
of the Helmholtz-Zentrum Geesthacht.
A description of the coastdat2 data set can be found here:
https://www.earth-syst-sci-data.net/6/147/2014/
SPDX-FileCopyrightText: 2016-2019 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__copyright__ = "<NAME> <<EMAIL>>"
__license__ = "MIT"
# Python libraries
import os
import datetime
import logging
from collections import namedtuple
import calendar
# External libraries
import requests
import pandas as pd
import pvlib
from shapely.geometry import Point
from windpowerlib.wind_turbine import WindTurbine
# Internal modules
from reegis import tools
from reegis import feedin
from reegis import config as cfg
from reegis import powerplants as powerplants
from reegis import geometries
from reegis import bmwi
def download_coastdat_data(filename=None, year=None, url=None,
test_only=False, overwrite=True):
"""
Download coastdat data set from internet source.
Parameters
----------
filename : str
Full path with the filename, where the downloaded file will be stored.
year : int or None
Year of the weather data set. If a url is passed this value will be
ignored because it is used to create the default url.
url : str or None
Own url can be used if the default url does not work an one found an
alternative valid url.
test_only : bool
If True the the url is tested but the file will not be downloaded
(default: False).
overwrite : bool
If True the file will be downloaded even if it already exist.
(default: True)
Returns
-------
str or None : If the url is valid the filename is returned otherwise None.
Examples
--------
>>> download_coastdat_data(year=2014, test_only=True)
'coastDat2_de_2014.h5'
>>> print(download_coastdat_data(url='https://osf.io/url', test_only=True))
None
>>> download_coastdat_data(filename='w14.hd5', year=2014) # doctest: +SKIP
"""
if url is None:
url_ids = cfg.get_dict("coastdat_url_id")
url_id = url_ids.get(str(year), None)
if url_id is not None:
url = cfg.get("coastdat", "basic_url").format(url_id=url_id)
if url is not None and not test_only:
response = requests.get(url, stream=True)
if response.status_code == 200:
msg = "Downloading the coastdat2 file of {0} from {1} ..."
logging.info(msg.format(year, url))
if filename is None:
headers = response.headers["Content-Disposition"]
filename = (
headers.split("; ")[1].split("=")[1].replace('"', "")
)
tools.download_file(filename, url, overwrite=overwrite)
return filename
else:
raise ValueError("URL not valid: {0}".format(url))
elif url is not None and test_only:
response = requests.get(url, stream=True)
if response.status_code == 200:
headers = response.headers["Content-Disposition"]
filename = headers.split("; ")[1].split("=")[1].replace('"', "")
else:
filename = None
return filename
else:
raise ValueError("No URL found for {0}".format(year))
def fetch_id_by_coordinates(latitude, longitude):
"""
Get nearest weather data set to a given location.
Parameters
----------
latitude : float
longitude : float
Returns
-------
int : coastdat id
Examples
--------
>>> fetch_id_by_coordinates(53.655119, 11.181475)
1132101
"""
coastdat_polygons = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
location = Point(longitude, latitude)
cid = coastdat_polygons[coastdat_polygons.contains(location)].index
if len(cid) == 0:
msg = "No id found for latitude {0} and longitude {1}."
logging.warning(msg.format(latitude, longitude))
return None
elif len(cid) == 1:
return cid[0]
def fetch_data_coordinates_by_id(coastdat_id):
"""
Returns the coordinates of the weather data set.
Parameters
----------
coastdat_id : int or str
ID of the coastdat weather data set
Returns
-------
namedtuple : Fields are latitude and longitude
Examples
--------
>>> location=fetch_data_coordinates_by_id(1132101)
>>> round(location.latitude, 3)
53.692
>>> round(location.longitude, 3)
11.351
"""
coord = namedtuple("weather_location", "latitude, longitude")
coastdat_polygons = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
c = coastdat_polygons.loc[int(coastdat_id)].geometry.centroid
return coord(latitude=c.y, longitude=c.x)
def fetch_coastdat_weather(year, coastdat_id):
"""
Fetch weather one coastdat weather data set.
Parameters
----------
year : int
Year of the weather data set
coastdat_id : numeric
ID of the coastdat data set.
Returns
-------
pd.DataFrame : Weather data set.
Examples
--------
>>> coastdat_id=fetch_id_by_coordinates(53.655119, 11.181475)
>>> fetch_coastdat_weather(2014, coastdat_id)['v_wind'].mean().round(2)
4.39
"""
weather_file_name = os.path.join(
cfg.get("paths", "coastdat"),
cfg.get("coastdat", "file_pattern").format(year=year),
)
if not os.path.isfile(weather_file_name):
download_coastdat_data(filename=weather_file_name, year=year)
key = "/A{0}".format(int(coastdat_id))
return pd.DataFrame(pd.read_hdf(weather_file_name, key))
def adapt_coastdat_weather_to_pvlib(weather, loc):
"""
Adapt the coastdat weather data sets to the needs of the pvlib.
Parameters
----------
weather : pandas.DataFrame
Coastdat2 weather data set.
loc : pvlib.location.Location
The coordinates of the weather data point.
Returns
-------
pandas.DataFrame : Adapted weather data set.
Examples
--------
>>> cd_id=1132101
>>> cd_weather=fetch_coastdat_weather(2014, cd_id)
>>> c=fetch_data_coordinates_by_id(cd_id)
>>> location=pvlib.location.Location(**getattr(c, '_asdict')())
>>> pv_weather=adapt_coastdat_weather_to_pvlib(cd_weather, location)
>>> 'ghi' in cd_weather.columns
False
>>> 'ghi' in pv_weather.columns
True
"""
w = pd.DataFrame(weather.copy())
w["temp_air"] = w.temp_air - 273.15
w["ghi"] = w.dirhi + w.dhi
clearskydni = loc.get_clearsky(w.index).dni
w["dni"] = pvlib.irradiance.dni(
w["ghi"],
w["dhi"],
pvlib.solarposition.get_solarposition(
w.index, loc.latitude, loc.longitude
).zenith,
clearsky_dni=clearskydni,
)
return w
def adapt_coastdat_weather_to_windpowerlib(weather, data_height):
"""
Adapt the coastdat weather data sets to the needs of the pvlib.
Parameters
----------
weather : pandas.DataFrame
Coastdat2 weather data set.
data_height : dict
The data height for each weather data column.
Returns
-------
pandas.DataFrame : Adapted weather data set.
Examples
--------
>>> cd_id=1132101
>>> cd_weather=fetch_coastdat_weather(2014, cd_id)
>>> data_height=cfg.get_dict('coastdat_data_height')
>>> wind_weather=adapt_coastdat_weather_to_windpowerlib(
... cd_weather, data_height)
>>> cd_weather.columns.nlevels
1
>>> wind_weather.columns.nlevels
2
"""
weather = pd.DataFrame(weather.copy())
cols = {
"v_wind": "wind_speed",
"z0": "roughness_length",
"temp_air": "temperature",
}
weather.rename(columns=cols, inplace=True)
dh = [(key, data_height[key]) for key in weather.columns]
weather.columns = pd.MultiIndex.from_tuples(dh)
return weather
def normalised_feedin_for_each_data_set(
year, wind=True, solar=True, overwrite=False
):
"""
Loop over all weather data sets (regions) and calculate a normalised time
series for each data set with the given parameters of the power plants.
This file could be more elegant and shorter but it will be rewritten soon
with the new feedinlib features.
year : int
The year of the weather data set to use.
wind : boolean
Set to True if you want to create wind feed-in time series.
solar : boolean
Set to True if you want to create solar feed-in time series.
Returns
-------
"""
# Get coordinates of the coastdat data points.
data_points = pd.read_csv(
os.path.join(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_centroid"),
),
index_col="gid",
)
pv_sets = None
wind_sets = None
# Open coastdat-weather data hdf5 file for the given year or try to
# download it if the file is not found.
weather_file_name = os.path.join(
cfg.get("paths", "coastdat"),
cfg.get("coastdat", "file_pattern").format(year=year),
)
if not os.path.isfile(weather_file_name):
download_coastdat_data(year=year, filename=weather_file_name)
weather = pd.HDFStore(weather_file_name, mode="r")
# Fetch coastdat data heights from ini file.
data_height = cfg.get_dict("coastdat_data_height")
# Create basic file and path pattern for the resulting files
coastdat_path = os.path.join(cfg.get("paths_pattern", "coastdat"))
feedin_file = os.path.join(
coastdat_path, cfg.get("feedin", "file_pattern")
)
# Fetch coastdat region-keys from weather file.
key_file_path = coastdat_path.format(year="", type="")[:-2]
key_file = os.path.join(key_file_path, "coastdat_keys.csv")
if not os.path.isfile(key_file):
coastdat_keys = weather.keys()
if not os.path.isdir(key_file_path):
os.makedirs(key_file_path)
pd.Series(coastdat_keys).to_csv(key_file)
else:
coastdat_keys = pd.read_csv(
key_file, index_col=[0], squeeze=True, header=None
)
txt_create = "Creating normalised {0} feedin time series for {1}."
hdf = {"wind": {}, "solar": {}}
if solar:
logging.info(txt_create.format("solar", year))
# Add directory if not present
os.makedirs(
coastdat_path.format(year=year, type="solar"), exist_ok=True
)
# Create the pv-sets defined in the solar.ini
pv_sets = feedin.create_pvlib_sets()
# Open a file for each main set (subsets are stored in columns)
for pv_key, pv_set in pv_sets.items():
filename = feedin_file.format(
type="solar", year=year, set_name=pv_key
)
if not os.path.isfile(filename) or overwrite:
hdf["solar"][pv_key] = pd.HDFStore(filename, mode="w")
if wind:
logging.info(txt_create.format("wind", year))
# Add directory if not present
os.makedirs(
coastdat_path.format(year=year, type="wind"), exist_ok=True
)
# Create the pv-sets defined in the wind.ini
wind_sets = feedin.create_windpowerlib_sets()
# Open a file for each main set (subsets are stored in columns)
for wind_key, wind_set in wind_sets.items():
for subset_key, subset in wind_set.items():
wind_sets[wind_key][subset_key] = WindTurbine(**subset)
filename = feedin_file.format(
type="wind", year=year, set_name=wind_key
)
if not os.path.isfile(filename) or overwrite:
hdf["wind"][wind_key] = pd.HDFStore(filename, mode="w")
# Define basic variables for time logging
remain = len(coastdat_keys)
done = 0
start = datetime.datetime.now()
# Loop over all regions
for coastdat_key in coastdat_keys:
# Get weather data set for one location
local_weather = weather[coastdat_key]
# Adapt the coastdat weather format to the needs of pvlib.
# The expression "len(list(hdf['solar'].keys()))" returns the number
# of open hdf5 files. If no file is open, there is nothing to do.
if solar and len(list(hdf["solar"].keys())) > 0:
# Get coordinates for the weather location
local_point = data_points.loc[int(coastdat_key[2:])]
# Create a pvlib Location object
location = pvlib.location.Location(
latitude=local_point["lat"], longitude=local_point["lon"]
)
# Adapt weather data to the needs of the pvlib
local_weather_pv = adapt_coastdat_weather_to_pvlib(
local_weather, location
)
# Create one DataFrame for each pv-set and store into the file
for pv_key, pv_set in pv_sets.items():
if pv_key in hdf["solar"]:
hdf["solar"][pv_key][coastdat_key] = feedin.feedin_pv_sets(
local_weather_pv, location, pv_set
)
# Create one DataFrame for each wind-set and store into the file
if wind and len(list(hdf["wind"].keys())) > 0:
local_weather_wind = adapt_coastdat_weather_to_windpowerlib(
local_weather, data_height
)
for wind_key, wind_set in wind_sets.items():
if wind_key in hdf["wind"]:
hdf["wind"][wind_key][
coastdat_key
] = feedin.feedin_wind_sets(local_weather_wind, wind_set)
# Start- time logging *******
remain -= 1
done += 1
if divmod(remain, 10)[1] == 0:
elapsed_time = (datetime.datetime.now() - start).seconds
remain_time = elapsed_time / done * remain
end_time = datetime.datetime.now() + datetime.timedelta(
seconds=remain_time
)
msg = "Actual time: {:%H:%M}, estimated end time: {:%H:%M}, "
msg += "done: {0}, remain: {1}".format(done, remain)
logging.info(msg.format(datetime.datetime.now(), end_time))
# End - time logging ********
for k1 in hdf.keys():
for k2 in hdf[k1].keys():
hdf[k1][k2].close()
weather.close()
logging.info(
"All feedin time series for {0} are stored in {1}".format(
year, coastdat_path.format(year=year, type="")
)
)
def store_average_weather(
data_type,
weather_path=None,
years=None,
keys=None,
out_file_pattern="average_data_{data_type}.csv",
):
"""
Get average wind speed over all years for each weather region. This can be
used to select the appropriate wind turbine for each region
(strong/low wind turbines).
Parameters
----------
data_type : str
The data_type of the coastdat weather data: 'dhi', 'dirhi', 'pressure',
'temp_air', 'v_wind', 'z0'.
keys : list or None
List of coastdat keys. If None all available keys will be used.
years : list or None
List of one or more years to calculate the average data from. You
have to make sure that the weather data files for the given years
exist in the weather path.
weather_path : str
Path to folder that contains all needed files. If None the default
path defined in the config file will be used.
out_file_pattern : str or None
Name of the results file with a placeholder for the data type e.g.
``average_data_{data_type}.csv``). If None no file will be written.
Examples
--------
>>> store_average_weather('temp_air', years=[2014, 2013]) # doctest: +SKIP
>>> v=store_average_weather('v_wind', years=[2014],
... out_file_pattern=None, keys=[1132101])
>>> float(v.loc[1132101].round(2))
4.39
"""
logging.info("Calculating the average wind speed...")
weather_pattern = cfg.get("coastdat", "file_pattern")
if weather_path is None:
weather_path = cfg.get("paths", "coastdat")
# Finding existing weather files.
data_files = os.listdir(weather_path)
# Possible time range for coastdat data set (reegis: 1998-2014).
check = True
if years is None:
years = range(1948, 2017)
check = False
used_years = []
for year in years:
if weather_pattern.format(year=year) in data_files:
used_years.append(year)
elif check is True:
msg = "File not found".format(weather_pattern.format(year=year))
raise FileNotFoundError(msg)
# Loading coastdat-grid as shapely geometries.
coastdat_polygons = pd.DataFrame(
geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
)
coastdat_polygons.drop("geometry", axis=1, inplace=True)
# Opening all weather files
weather = dict()
# open hdf files
for year in used_years:
weather[year] = pd.HDFStore(
os.path.join(weather_path, weather_pattern.format(year=year)),
mode="r",
)
if keys is None:
keys = coastdat_polygons.index
n = len(list(keys))
logging.info("Remaining: {0}".format(n))
for key in keys:
data_type_avg = pd.Series()
n -= 1
if n % 100 == 0:
logging.info("Remaining: {0}".format(n))
hdf_id = "/A{0}".format(key)
for year in used_years:
ws = weather[year][hdf_id][data_type]
data_type_avg = data_type_avg.append(ws, verify_integrity=True)
# calculate the average wind speed for one grid item
coastdat_polygons.loc[
key, "{0}_avg".format(data_type)
] = data_type_avg.mean()
# Close hdf files
for year in used_years:
weather[year].close()
if keys is not None:
coastdat_polygons.dropna(inplace=True)
# write results to csv file
if out_file_pattern is not None:
filename = out_file_pattern.format(data_type=data_type)
fn = os.path.join(weather_path, filename)
logging.info("Average temperature saved to {0}".format(fn))
coastdat_polygons.to_csv(fn)
return coastdat_polygons
def spatial_average_weather(
year, geo, parameter, name, outpath=None, outfile=None
):
"""
Calculate the mean value of a parameter over all data sets within each
region for one year.
Parameters
----------
year : int
Select the year you want to calculate the average temperature for.
geo : geometries.Geometry object
Polygons to calculate the average parameter for.
outpath : str
Place to store the outputfile.
outfile : str
Set your own name for the outputfile.
parameter : str
Name of the item (temperature, wind speed,... of the weather data set.
name : str
Name of the regions table to be used as a column name.
Returns
-------
str : Full file name of the created file.
Example
-------
>>> germany_geo=geometries.load(
... cfg.get('paths', 'geometry'),
... cfg.get('geometry', 'germany_polygon'))
>>> fn=spatial_average_weather(2012, germany_geo, 'temp_air', 'deTemp',
... outpath=os.path.expanduser('~')
... )# doctest: +SKIP
>>> temp=pd.read_csv(fn, index_col=[0], parse_dates=True, squeeze=True
... )# doctest: +SKIP
>>> round(temp.mean() - 273.15, 2)# doctest: +SKIP
8.28
>>> os.remove(fn)# doctest: +SKIP
"""
logging.info(
"Getting average {0} for {1} in {2} from coastdat2.".format(
parameter, name, year
)
)
name = name.replace(" ", "_")
# Create a Geometry object for the coastdat centroids.
coastdat_geo = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
coastdat_geo["geometry"] = coastdat_geo.centroid
# Join the tables to create a list of coastdat id's for each region.
coastdat_geo = geometries.spatial_join_with_buffer(
coastdat_geo, geo, name=name, limit=0
)
# Fix regions with no matches (no matches if a region ist too small).
fix = {}
for reg in set(geo.index) - set(coastdat_geo[name].unique()):
reg_point = geo.representative_point().loc[reg]
coastdat_poly = geometries.load(
cfg.get("paths", "geometry"),
cfg.get("coastdat", "coastdatgrid_polygon"),
)
fix[reg] = coastdat_poly.loc[
coastdat_poly.intersects(reg_point)
].index[0]
# Open the weather file
weather_file = os.path.join(
cfg.get("paths", "coastdat"),
cfg.get("coastdat", "file_pattern").format(year=year),
)
if not os.path.isfile(weather_file):
download_coastdat_data(year=year, filename=weather_file)
weather = pd.HDFStore(weather_file, mode="r")
# Calculate the average temperature for each region with more than one id.
avg_value = pd.DataFrame()
for region in geo.index:
cd_ids = coastdat_geo[coastdat_geo[name] == region].index
number_of_sets = len(cd_ids)
tmp = pd.DataFrame()
logging.debug((region, len(cd_ids)))
for cid in cd_ids:
try:
cid = int(cid)
except ValueError:
pass
if isinstance(cid, int):
key = "A" + str(cid)
else:
key = cid
tmp[cid] = weather[key][parameter]
if len(cd_ids) < 1:
key = "A" + str(fix[region])
avg_value[region] = weather[key][parameter]
else:
avg_value[region] = tmp.sum(1).div(number_of_sets)
weather.close()
# Create the name an write to file
regions = sorted(geo.index)
if outfile is None:
out_name = "{0}_{1}".format(regions[0], regions[-1])
outfile = os.path.join(
outpath,
"average_{parameter}_{type}_{year}.csv".format(
year=year, type=out_name, parameter=parameter
),
)
avg_value.to_csv(outfile)
logging.info("Average temperature saved to {0}".format(outfile))
return outfile
def federal_state_average_weather(year, parameter):
"""
Example for spatial_average_weather() with federal states polygons.
Parameters
----------
year
parameter
Returns
-------
"""
federal_states = geometries.get_federal_states_polygon()
filename = os.path.join(
cfg.get("paths", "coastdat"),
"average_{0}_BB_TH_{1}.csv".format(parameter, year),
)
if not os.path.isfile(filename):
spatial_average_weather(
year, federal_states, parameter, "federal_states", outfile=filename
)
return pd.read_csv(
filename,
index_col=[0],
parse_dates=True,
date_parser=lambda col: pd.to_datetime(col, utc=True),
)
def aggregate_by_region_coastdat_feedin(
pp, regions, year, category, outfile, weather_year=None
):
"""
Aggregate wind and pv feedin time series for each region defined by
a geoDataFrame with region polygons.
Parameters
----------
pp : pd.DataFrame
Power plant table.
regions : geopandas.geoDataFrame
Table with the polygons.
year : int
Year for the power plants and for the weather data if weather_year is
None.
category : str
Feed-in category: 'wind' or 'solar'
outfile : str
Name of the output file.
weather_year : int or None
If None the year parameter will be used for the weather year.
"""
cat = category.lower()
logging.info("Aggregating {0} feed-in for {1}...".format(cat, year))
if weather_year is None:
weather_year = year
weather_year_str = ""
else:
logging.info("Weather data taken from {0}.".format(weather_year))
weather_year_str = " (weather: {0})".format(weather_year)
# Define the path for the input files.
coastdat_path = os.path.join(cfg.get("paths_pattern", "coastdat")).format(
year=weather_year, type=cat
)
# Do normalized timeseries exist? If not, create
if os.path.isdir(coastdat_path):
if len(os.listdir(coastdat_path)) == 0:
normalised_feedin_for_each_data_set(weather_year)
else:
normalised_feedin_for_each_data_set(weather_year)
# Prepare the lists for the loops
set_names = []
set_name = None
pwr = dict()
columns = dict()
replace_str = "coastdat_{0}_{1}_".format(weather_year, category)
for file in os.listdir(coastdat_path):
if file[-2:] == "h5":
set_name = file[:-3].replace(replace_str, "")
set_names.append(set_name)
pwr[set_name] = pd.HDFStore(os.path.join(coastdat_path, file))
columns[set_name] = pwr[set_name]["/A1129087"].columns
# Create DataFrame with MultiColumns to take the results
my_index = pwr[set_name]["/A1129087"].index
my_cols = pd.MultiIndex(
levels=[[], [], []],
codes=[[], [], []],
names=["region", "set", "subset"],
)
feed_in = pd.DataFrame(index=my_index, columns=my_cols)
# Loop over all aggregation regions
# Sum up time series for one region and divide it by the
# capacity of the region to get a normalised time series.
for region in regions:
try:
coastdat_ids = pp.loc[(category, region)].index
except KeyError:
coastdat_ids = []
number_of_coastdat_ids = len(coastdat_ids)
logging.info(
"{0}{3} - {1} ({2})".format(
year, region, number_of_coastdat_ids, weather_year_str
)
)
logging.debug("{0}".format(coastdat_ids))
# Loop over all sets that have been found in the coastdat path
if number_of_coastdat_ids > 0:
for name in set_names:
# Loop over all sub-sets that have been found within each file.
for col in columns[name]:
temp = pd.DataFrame(index=my_index)
# Loop over all coastdat ids, that intersect with the
# actual region.
for coastdat_id in coastdat_ids:
# Create a tmp table for each coastdat id.
coastdat_key = "/A{0}".format(int(coastdat_id))
pp_inst = float(
pp.loc[
(category, region, coastdat_id),
"capacity_{0}".format(year),
]
)
temp[coastdat_key] = pwr[name][coastdat_key][col][
:8760
].multiply(pp_inst)
# Sum up all coastdat columns to one region column
colname = "_".join(col.split("_")[-3:])
feed_in[region, name, colname] = temp.sum(axis=1).divide(
float(
pp.loc[
(category, region), "capacity_{0}".format(year)
].sum()
)
)
feed_in.to_csv(outfile)
for name_of_set in set_names:
pwr[name_of_set].close()
def aggregate_by_region_hydro(pp, regions, year, outfile_name):
"""Aggregate hydro power plants by region."""
hydro = bmwi.bmwi_re_energy_capacity()["water"]
hydro_capacity = pp.loc["Hydro", "capacity_{0}".format(year)].sum()
full_load_hours = hydro.loc[year, "energy"] / hydro_capacity * 1000
hydro_path = os.path.abspath(os.path.join(*outfile_name.split("/")[:-1]))
if not os.path.isdir(hydro_path):
os.makedirs(hydro_path)
idx = pd.date_range(
start="{0}-01-01 00:00".format(year),
end="{0}-12-31 23:00".format(year),
freq="H",
tz="Europe/Berlin",
)
feed_in = pd.DataFrame(columns=regions, index=idx)
feed_in[feed_in.columns] = full_load_hours / len(feed_in)
feed_in.to_csv(outfile_name)
# https://shop.dena.de/fileadmin/denashop/media/Downloads_Dateien/esd/
# 9112_Pumpspeicherstudie.pdf
# S. 110ff
def aggregate_by_region_geothermal(regions, year, outfile_name):
"""Aggregate hydro power plants by region."""
full_load_hours = cfg.get("feedin", "geothermal_full_load_hours")
hydro_path = os.path.abspath(os.path.join(*outfile_name.split("/")[:-1]))
if not os.path.isdir(hydro_path):
os.makedirs(hydro_path)
idx = pd.date_range(
start="{0}-01-01 00:00".format(year),
end="{0}-12-31 23:00".format(year),
freq="H",
tz="Europe/Berlin",
)
feed_in = | pd.DataFrame(columns=regions, index=idx) | pandas.DataFrame |
from flask import Response, url_for, current_app, request
from flask_restful import Resource, reqparse
import pandas as pd
import os
from pathlib import Path
from flask_mysqldb import MySQL
from datetime import datetime
import random
import string
from flask_mail import Mail, Message
db = MySQL()
parser = reqparse.RequestParser()
class ApproveCovid(Resource):
def approve(self, update_id, aid):
cur = db.connection.cursor()
cur.execute(
"""
INSERT INTO Administrator_approved_covid_update (Update_id, AID)
VALUES (%s, %s);
""", (update_id, aid)
)
db.connection.commit()
def getUpdate(self, update_id):
cur = db.connection.cursor()
cur.execute(
"""
Select * from Covid_update where Update_id = %s
""", (update_id,)
)
data = cur.fetchall()
return data[0]
def post(self):
params = ['update_id', 'aid', 'approve']
for elem in params:
parser.add_argument(elem)
args = parser.parse_args()
print(args['update_id'], args['aid'])
_, recovered, death, confirmed, _, countie = self.getUpdate(args['update_id'])
self.approve(args['update_id'], args['aid'])
print(confirmed, death, recovered, countie)
if (args['approve']):
cur = db.connection.cursor()
cur.execute(
"""
UPDATE covid_data
SET Confirmed = %s, Deaths = %s, Recovered = %s
where Admin2 = %s;
""", (confirmed, death, recovered, countie)
)
db.connection.commit()
print(cur.rowcount)
if cur.rowcount > 0:
return Response("Record Sucessfully updated", status=200)
else:
return Response("Update failed", status=500)
else:
return Response("Update Sucessfully Denied", status=500)
class CovidUpdates(Resource):
def administratorExist(self, aid):
cur = db.connection.cursor()
cur.execute(
"""
Select * from Administrators where AID = %s
""", (aid)
)
return True if cur.rowcount > 0 else False
def get(self):
parser.add_argument('aid')
args = parser.parse_args()
if self.administratorExist(args['aid']) == False:
return Response("Administrators does not exist", 400)
cur = db.connection.cursor()
cur.execute(
"""
Select * from Covid_update where Update_id not in (
Select Update_id from Administrator_approved_covid_update
);
"""
)
data = cur.fetchall()
row_headers=[x[0] for x in cur.description]
json_data=[]
for result in data:
json_data.append(dict(zip(row_headers,result)))
cur.close()
df = | pd.DataFrame(json_data) | pandas.DataFrame |
"""
Copyright (c) 2018 The Regents of the University of Michigan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Train a model (LSTM, RNN, HMM, SVM, Logistic Regression) to predict student's dropout behavior in MOOCs.
Or test a trained_model by providing predict labels and predict probabilities for given test data.
Take csv files with students' weekly features as input and return a trained LSTM model (Keras).
Each weekly csv is a list of users (rows), with columns corresponding to the features for that week.
Features come from table 1, pg 123, of the paper ("Temporal predication of dropouts in MOOCs:
Reaching the low hanging fruit through stacking generalization", Xing et al. 2016)
These models are generally a replication of the models in the paper ("Temporal
Models for Predicting Student Dropout in Massive Open Online Courses", Fei and Yeung 2015)
The output model is saved in HDF5 format and will contain the architecture, weights, training figuration
and states of the optimizer (allowing to resume training exactly where you left off), you can load it by:
from keras.models import load_model
model = load_model('my_model.h5')
Usage: python3 train_lstm.py \
-i /raw_data/path/to/feature_data.csv \
-l /raw_data/path/to/label_data.csv \
-f number of features per week \
-s hidden layer size \
-d definition of droupout, take in {1,2,3} \
-k an integer for number of folds in cross validation \
-o /output_file/path/to/my_output \
-m train, validation or test \
-t the name of the method you want to use (LSTM, RNN, SVM_RBF, SVM_LINEAR, LR) \
-a /trained_model/path/to/method.h5 (required only in test mode)
On Yuming's local environment
Train mode:
python train_lstm.py \
-i C:\\Users\\Umean\\Desktop\\MOOC\\vaccines_002_features.csv \
-l C:\\Users\\Umean\\Desktop\\MOOC\\vaccines_002_labels.csv \
-f 7 \
-o lstm.h5 \
-m train \
-t lstm
Test(prediction) mode:
python train_lstm.py \
-i C:\\Users\\Umean\\Desktop\\MOOC\\vaccines_002_features.csv \
-f 7 \
-a lstm.h5 \
-o lstm_predictions \
-m test \
-t lstm
Validation(CV) mode:
python train_lstm.py \
-i C:\\Users\\Umean\\Desktop\\MOOC\\vaccines_002_features.csv \
-l C:\\Users\\Umean\\Desktop\\MOOC\\vaccines_002_labels.csv \
-f 7 \
-k 5 \
-o auc_result \
-m validation \
-t lstm \
"""
import argparse
from keras import regularizers
from keras.models import Sequential
from keras.models import clone_model
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import SimpleRNN
from keras.layers import LSTM
from keras.models import load_model
import random
from sklearn.dummy import DummyClassifier
from sklearn.externals import joblib
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import LogisticRegressionCV
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import scale
from sklearn.model_selection import StratifiedKFold
from sklearn.utils import check_X_y
import pandas as pd
import numpy as np
EPOCHS = 100
BATCH_SIZE = 128
HIDDEN_SIZE = 20
NUM_FOLD = 5
RBF_SVM_MAX_N = 30000
LINEAR_SVM_MAX_N = 30000
CV_SUBSAMPLING_TYPE = 1
PARAMETER_SET_SIZE_THRESHOLD = 1500
KERAS_MODELS = ("LSTM", "LSTM_DROPOUT", "LSTM_L1REG", "RNN")
NON_KERAS_MODELS = ("LR", "SVM_RBF", "SVM_LINEAR")
def extract_XY(feature_data, label_data, n_feature, mode):
"""
Extract well-shaped scaled feature and label tensors (X and y) from raw data in train and validation mode.
Extract well-shaped scaled feature tensors and user_id arrays from raw data in test mode.
Seperate features by weeks in raw data.
:param feature_data: a pandas.Dataframe with columns for userID, features in different weeks.
:param label_data: a pandas.Dataframe with columns for userID and label. None if in test mode.
:param n_feature: an integer of the number of features per week.
:param mode: a string indicates the purpose (train or test)
:return: tuples of array (X, y), where X has shape ( , n_week, n_feature),
y has shape ( ,1) and y are ids in test mode
"""
N = feature_data.shape[0]
if mode == "train" or mode == "validation":
try:
assert 'label_value' not in feature_data.columns, "Feature data shouldn't include labels in test mode"
except AssertionError:
print("[WARNING] Feature data shouldn't include labels in test mode")
feature_data = feature_data.drop('label_value', 1)
assert (feature_data.shape[1] - 1) % n_feature == 0, "Wrong input data shape"
# check input data shape; if dimensions don't match, try filtering the feature data
# (this exception occurs when the feature extraction pulls from non-clickstream data sources;
# users without any clickstream entries are not counted in dropout labels)
try:
assert feature_data.shape[0] == label_data.shape[0], "userID doesn't match"
except AssertionError:
# filter feature data to only include users in label_data
print("[WARNING] userID columns in feature and label data do not match; attempting to filter feature data")
feature_data = feature_data[feature_data.userID.isin(label_data.userID)]
N = feature_data.shape[0]
n_week = int((feature_data.shape[1] - 1) / n_feature)
X = np.array(feature_data.drop('userID', 1))
X = scale(X)
merged_data = pd.merge(feature_data, label_data, on='userID')
y = np.array(merged_data["label_value"], dtype="int").reshape(N, 1)
if mode == "test":
try:
assert 'label_value' not in feature_data.columns, "Feature data shouldn't include labels in test mode"
except AssertionError:
print("[WARNING] Feature data shouldn't include labels in test mode")
feature_data = feature_data.drop('label_value', 1)
assert (feature_data.shape[1] - 1) % n_feature == 0, "Wrong input data shape"
n_week = int((feature_data.shape[1] - 1) / n_feature)
X = np.array(feature_data.iloc[:, 1:])
X = scale(X)
y = feature_data.iloc[:, 0]
X = X.reshape(N, n_week, n_feature)
return X, y
def downsample_xy_to_n(X, y, n = 30000):
"""
If X, y contain more than n samples, return a random subsample of size n.
:param X: a numpy array of features, has shape ( , n_week, n_feature)
:param y: a numpy array of labels, has shape (N,1)
:param n: integer.
:return: tuples of array (X, y), where X has shape ( , n_week, n_feature), y has shape ( ,1) and y are ids in test mode
"""
assert X.shape[0] == y.shape[0], "feature and label vectors must be of same length"
num_obs = X.shape[0]
if num_obs > n:
print("[INFO] downsampling data from size {} to size {}".format(num_obs, n))
subsample_ix = random.sample(range(0, num_obs), n)
X = X[subsample_ix,]
y = y[subsample_ix,]
return X, y
def droprate_lstm_train(X, y, hidden_size=HIDDEN_SIZE):
"""
Construct a LSTM model to predict the type I dropout rate (See paper) from features in every week.
Fit the model with train data.
:param X: a numpy array of features, has shape ( , n_week, n_feature)
:param y: a numpy array of labels, has shape (N,1)
:param hidden_size: an integer of hidden layer size.
:return: model: a fitted LSTM model as keras.models.Sequential
"""
model = Sequential()
model.add(LSTM(hidden_size, input_shape=(X.shape[1], X.shape[2])))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
print(model.summary())
model.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE)
return model
def droprate_lstm_dropout_train(X, y, hidden_size=HIDDEN_SIZE):
"""
Construct a LSTM model with a single dropout layer after input later to predict the type I dropout rate (See paper) from features in every week.
Fit the model with train data.
:param X: a numpy array of features, has shape ( , n_week, n_feature)
:param y: a numpy array of labels, has shape (N,1)
:param hidden_size: an integer of hidden layer size.
:return: model: a fitted LSTM model as keras.models.Sequential
"""
model = Sequential()
model.add(Droput(0.2, input_shape=(X.shape[1], X.shape[2])))
model.add(LSTM(hidden_size))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
print(model.summary())
model.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE)
return model
def droprate_lstm_l1reg_train(X, y, hidden_size=HIDDEN_SIZE, l1_lambda=0.01):
"""
Construct a LSTM model with a single dropout layer after input later to predict the type I dropout rate (See paper) from features in every week.
Fit the model with train data.
:param X: a numpy array of features, has shape ( , n_week, n_feature)
:param y: a numpy array of labels, has shape (N,1)
:param hidden_size: an integer of hidden layer size.
:return: model: a fitted LSTM model as keras.models.Sequential
"""
model = Sequential()
model.add(LSTM(hidden_size, input_shape=(X.shape[1], X.shape[2]),
kernel_regularizer=regularizers.l1(l1_lambda), activity_regularizer=regularizers.l1(l1_lambda)))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
print(model.summary())
model.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE)
return model
def droprate_rnn_train(X, y, hidden_size=HIDDEN_SIZE):
"""
Construct a RNN model to predict the type I dropout rate (See paper) from features in every week.
Fit the model with train data.
:param X: a numpy array of features, has shape ( , n_week, n_feature)
:param y: a numpy array of labels, has shape (N,1)
:param hidden_size: an integer of hidden layer size.
:return: model: a fitted RNN model as keras.models.Sequential
"""
model = Sequential()
model.add(SimpleRNN(hidden_size, input_shape=(X.shape[1], X.shape[2],)))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
print(model.summary())
model.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE)
return model
def droprate_lr_train(X, y, k=NUM_FOLD):
"""
Construct a Logistic Regression model to predict the type I dropout rate (See paper) from features in every week.
Fit the model with train data and use Cross Validation to choose best C.
:param X: a numpy array of features, has shape ( , n_week, n_feature)
:param y: a numpy array of labels, has shape (N,1)
:param k: an interger of number of folds in cross validation for choosing C
:return: logistic_classifier: a fitted Logistic Regression model as sklearn.LogisticRegressionCV
"""
Cs = np.logspace(-1, 6, 8)
X = X.reshape(X.shape[0], X.shape[1]*X.shape[2])
y = np.ravel(y)
print("[INFO] Select tuning parameters for LR")
logistic_classifier = LogisticRegressionCV(Cs=Cs, cv=k, scoring="roc_auc")
print("[INFO] Training logistic regression model")
logistic_classifier.fit(X, y)
print("[INFO] Best parameter: ", logistic_classifier.C_, " out of ", logistic_classifier.Cs_)
print("[INFO] Accuracy:", logistic_classifier.score(X, y))
return logistic_classifier
def droprate_svm_rbf_train(X, y, k=NUM_FOLD):
"""
Construct a RBF kernel SVM classifier to predict the type I dropout rate (See paper) from features in every week.
Fit the model with train data and use Cross Validation to choose best C and gamma.
:param X: a numpy array of features, has shape ( , n_week, n_feature)
:param y: a numpy array of labels, has shape (N,1)
:param k: an interger of number of folds in cross validation for choosing C
:return: svm_classifier: a fitted RBF kernel SVM classifier as sklearn.GridSearchCV
"""
if X.shape[0] <= PARAMETER_SET_SIZE_THRESHOLD:
C_RANGE = np.logspace(-1, 2, 4)
GAMMA_RANGE = np.logspace(-1, 1, 3)
print("[INFO] Large Parameters Set")
else:
C_RANGE = np.logspace(-1, 0, 2)
GAMMA_RANGE = np.logspace(-2, -1, 2)
print("[INFO] Small Parameters Set")
X = X.reshape(X.shape[0], X.shape[1] * X.shape[2])
y = np.ravel(y)
param_grid = dict(gamma=GAMMA_RANGE, C=C_RANGE)
cv = StratifiedKFold(n_splits=k)
print("[INFO] Select tuning parameters for RBF SVM")
svm_classifier = GridSearchCV(SVC(probability=True), param_grid=param_grid, cv=cv, scoring="roc_auc")
print("[INFO] Training RBF SVM model")
svm_classifier.fit(X, y)
print("[INFO] Best parameter: ", svm_classifier.best_params_)
#print("Accuracy:", svm_classifier.score(X, y))
return svm_classifier
def droprate_svm_linear_train(X, y, k=NUM_FOLD):
"""
Construct a linear kernel SVM classifier to predict the type I dropout rate (See paper) from features in every week.
Fit the model with train data and use Cross Validation to choose best C and gamma.
:param X: a numpy array of features, has shape ( , n_week, n_feature)
:param y: a numpy array of labels, has shape (N,1)
:param k: an interger of number of folds in cross validation for choosing C
:return: svm_classifier: a fitted linear kernel SVM classifier as sklearn.GridSearchCV
"""
if X.shape[0] <= PARAMETER_SET_SIZE_THRESHOLD:
C_RANGE = np.logspace(-1, 2, 4)
print("[INFO] Large Parameters Set")
else:
C_RANGE = np.logspace(-1, 0, 2)
print("[INFO] Small Parameters Set")
X = X.reshape(X.shape[0], X.shape[1] * X.shape[2])
y = np.ravel(y)
param_grid = dict(C=C_RANGE)
cv = StratifiedKFold(n_splits=k)
print("[INFO] Select tuning parameters for linear SVM")
linear_svm_classifier = GridSearchCV(SVC(kernel='linear', probability=True),
param_grid=param_grid, cv=cv, scoring="roc_auc")
print("[INFO] Training linear SVM model")
linear_svm_classifier.fit(X, y)
print("[INFO] Best parameter: ", linear_svm_classifier.best_params_)
# print("Accuracy:", svm_classifier.score(X, y))
return linear_svm_classifier
def split_indices(y, num_fold):
"""
Provide sets of train-test indices to split the raw data into several stratified folds
:param y: labels of the raw_data, shape (N, 1)
:param num_fold: an interger of the number of folds in Cross Validation.
:return: a list of tuples (train_index, test_index) of length num_fold
train_index: index of train data in each train-test set
test_index: index of test data in each train-test set
"""
skf = StratifiedKFold(n_splits=num_fold)
N = y.shape[0]
# np.set_printoptions(threshold=np.nan)
indices = skf.split(np.zeros(N), y.flatten())
return indices
def model_validation(train_func, model_type, X, y, num_fold):
"""
Calculate the model's ROC_AUC score by Stratified K-Folds Cross Validation on whole input data.
:param train_func: train function of certain type of model
:param model_type: model type
:param X: a numpy array of features, has shape ( , n_week, n_feature)
:param y: a numpy array of labels, has shape (N, 1)
:param num_fold: an interger of the number of folds in Cross Validation
:return: a real value represents the AUC score
"""
num_obs = X.shape[0]
indices = split_indices(y, num_fold)
cv_AUC = []
cv_id = 1
print("[INFO] Begin CV")
for train_index, test_index in indices:
print("[INFO] Fold %d" % cv_id)
num_train_obs = len(train_index)
if CV_SUBSAMPLING_TYPE == 2 and model_type == 'SVM' and num_train_obs > RBF_SVM_MAX_N:
# Second type of downsampling in CV: (when num_train_obs > RBF_SVM_MAX_N)
# In every fold process, randomly choose RBF_SVM_MAX_N samples to train SVM and predict on all the rest
# Then repeat this process num_fold times and average the AUC
train_index = random.sample(set(train_index), RBF_SVM_MAX_N)
print("[INFO] downsampling data from size {} to size {}".format(num_train_obs, len(train_index)))
test_index = list(set(range(0, num_obs)) - set(train_index))
model = train_func(X[train_index], y[train_index])
if model_type in ["LR", "SVM_RBF", 'SVM_LINEAR']:
X_test = X[test_index]
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1] * X_test.shape[2])
y_pred = model.predict_proba(X_test)[:, 1]
if model_type in ["LSTM", "RNN"]:
y_pred = model.predict_proba(X[test_index])
cv_AUC.append(roc_auc_score(y[test_index], y_pred))
cv_id += 1
scores = np.mean(np.array(cv_AUC))
print("[INFO] AUC: %.2f%%" % (scores * 100))
return scores
def model_evaluate(model_type, raw_data, n_feature, num_fold, hidden_size=20):
"""
Evaluate the LSTM or rnn model by Stratified K-Folds Cross Validation.
:param model_type: model type
:param raw_data: a pandas.Dataframe with columns for userID, features in different weeks and label
:param n_feature: an integer of the number of features per week
:param num_fold: an interger of the number of folds in Cross Validation
:param hidden_size: an integer of hidden layer size.
:return: a real value represents the accuracy.
"""
X, y = extract_XY(raw_data, n_feature, "train")
model_unfitted = Sequential()
if model_type == "RNN":
model_unfitted.add(SimpleRNN(hidden_size, input_shape=(X.shape[1], X.shape[2],)))
if model_type == "LSTM":
model_unfitted.add(LSTM(hidden_size, input_shape=(X.shape[1], X.shape[2],)))
model_unfitted.add(Dense(1, activation='sigmoid'))
model_unfitted.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
indices = split_indices(y, num_fold)
cv_AUC = []
for train_index, test_index in indices:
model = clone_model(model_unfitted)
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.fit(X[train_index], y[train_index], epochs=EPOCHS, batch_size=BATCH_SIZE)
y_pred = model.predict_proba(X[test_index])
cv_AUC.append(roc_auc_score(y[test_index], y_pred))
scores = np.mean(np.array(cv_AUC))
print("AUC: %.2f%%" % (scores * 100))
return scores
def lr_svm_predict(model, X, user_id):
"""
Do predictions based on the given trained LR or SVM model and the data features.
:param model: the trained sklearn model
:param X: a numpy array of features, has shape ( , n_week, n_feature)
:param user_id: an array of the user_ids
:return: a pandas DataFrame of user_ids, predict probabilities and predict labels.
"""
N = X.shape[0]
X = X.reshape(X.shape[0], X.shape[1] * X.shape[2])
print("[INFO] Predicting")
y_prob = model.predict_proba(X)[:, 1].reshape(N)
y_pred = model.predict(X).reshape(N)
predictions = | pd.DataFrame({'userID': user_id, 'prob': y_prob, 'pred': y_pred}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
checkEuroClassesValid
createEFTInput
getProportions
readFleetProps
specifyBusCoach
specifyEuroProportions
SpecifyWeight
Created on Fri Apr 20 14:51:30 2018
@author: edward.barratt
"""
import re
import inspect
import numpy as np
import pandas as pd
import pywintypes
import xlrd
from EFT_Tools import (availableRoadTypes,
euroClassNameVariations,
euroClassNameVariationsAll,
euroClassNameVariationsIgnore,
euroSearchTerms,
euroTechs,
getLogger,
logprint,
techVehs,
VehSplits,
weightClassNameVariations)
def checkEuroClassesValid(workBook, vehRowStarts, vehRowEnds, EuroClassNameColumns, Type=99, logger=None):
"""
Check that all of the available euro classes are specified.
"""
parentFrame = inspect.currentframe().f_back
(filename, xa, xb, xc, xd) = inspect.getframeinfo(parentFrame)
# Get the logging details.
loggerM = getLogger(logger, 'checkEuroClassesValid')
if Type == 1:
logprint(loggerM, "Checking all motorcycle euro class names are understood.")
elif Type == 2:
logprint(loggerM, "Checking all hybrid bus euro class names are understood.")
elif Type == 0:
logprint(loggerM, "Checking all other euro class names are understood.")
else:
logprint(loggerM, "Checking all euro class names are understood.")
ws_euro = workBook.Worksheets("UserEuro")
for [vi, vehRowStart] in enumerate(vehRowStarts):
vehRowEnd = vehRowEnds[vi]
for [ci, euroNameCol] in enumerate(EuroClassNameColumns):
euroClassRange = "{col}{rstart}:{col}{rend}".format(col=euroNameCol, rstart=vehRowStart, rend=vehRowEnd)
euroClassesAvailable = ws_euro.Range(euroClassRange).Value
for ecn in euroClassesAvailable:
ecn = ecn[0]
if ecn is None:
continue
if ecn not in euroClassNameVariationsAll:
if ecn not in euroClassNameVariationsIgnore:
raise ValueError('Unrecognized Euro Class Name: "{}".'.format(ecn))
def createEFTInput(vBreakdown='Detailed Option 2',
speeds=[5,6,7,8,9,10,12,14,16,18,20,25,30,35,40,
45,50,60,70,80,90,100,110,120,130,140],
roadTypes=availableRoadTypes,
vehiclesToSkip=['Taxi (black cab)'],
vehiclesToInclude=None,
tech='All',
logger=None):
"""
vehiclesToInclude trumps (and overwrites) vehiclesToSkip
"""
# Get the logging details.
# Get the logging details.
loggerM = getLogger(logger, 'createEFTInput')
logprint(loggerM, 'Creating EFT input.', level='debug')
logprint(loggerM, 'Initial vehiclesToSkip: {}'.format(', '.join(vehiclesToSkip)), level='debug')
logprint(loggerM, 'Initial vehiclesToInclude: {}'.format(', '.join(vehiclesToInclude)), level='debug')
VehSplit = VehSplits[vBreakdown]
logprint(loggerM, 'VehSplit: {}'.format(', '.join(VehSplit)), level='debug')
if vehiclesToInclude is not None:
# Populate vehiclesToSkip with those vehicles that are not included.
vehiclesToSkip = []
for veh in VehSplit:
if veh not in vehiclesToInclude:
vehiclesToSkip.append(veh)
logprint(loggerM, 'Intermediate vehiclesToSkip: {}'.format(', '.join(vehiclesToSkip)), level='debug')
#RoadTypes = ['Urban (not London)', 'Rural (not London)', 'Motorway (not London)']
if tech != 'All':
# Add vehicles to vehiclesToSkip that are irrelevant for the chosen technology.
for veh in VehSplit:
if veh not in techVehs[tech]:
vehiclesToSkip.append(veh)
vehiclesToSkip = list(set(vehiclesToSkip))
logprint(loggerM, 'Final vehiclesToSkip: {}'.format(', '.join(vehiclesToSkip)), level='debug')
if type(roadTypes) is str:
if roadTypes in ['all', 'All', 'ALL']:
roadTypes = availableRoadTypes
else:
roadTypes = [roadTypes]
if vBreakdown == 'Basic Split':
numRows = 2*len(roadTypes)*len(speeds)
else:
numRows = len(roadTypes)*len(speeds)*(len(VehSplit)-len(vehiclesToSkip))
numCols = 6 + len(VehSplit)
inputDF = pd.DataFrame(index=range(numRows), columns=range(numCols))
ri = -1
for rT in roadTypes:
logprint(loggerM, 'roadType - {}'.format(rT), level='debug')
for sp in speeds:
logprint(loggerM, ' speed - {}'.format(sp), level='debug')
for veh in VehSplit:
logprint(loggerM, ' vehicle - {}'.format(veh), level='debug')
#print(' veh - {}'.format(veh))
if vBreakdown == 'Basic Split':
ri += 2
#inputDF.set_value(ri-1, 0, 'S{} - LDV - {}'.format(sp, rT))
inputDF.iat[ri-1, 0] = 'S{} - LDV - {}'.format(sp, rT)
inputDF.iat[ri-1, 1] = rT
inputDF.iat[ri-1, 2] = 1
inputDF.iat[ri-1, 3] = 0
inputDF.iat[ri-1, 4] = sp
inputDF.iat[ri-1, 5] = 1
inputDF.iat[ri-1, 6] = 1
inputDF.iat[ri, 0] = 'S{} - HDV - {}'.format(sp, rT)
inputDF.iat[ri, 1] = rT
inputDF.iat[ri, 2] = 1
inputDF.iat[ri, 3] = 100
inputDF.iat[ri, 4] = sp
inputDF.iat[ri, 5] = 1
inputDF.iat[ri, 6] = 1
else:
if veh in vehiclesToSkip:
logprint(loggerM, ' skipped', level='debug')
pass
else:
logprint(loggerM, ' including', level='debug')
ri += 1
inputDF.iat[ri, 0] = 'S{} - {} - {}'.format(sp, veh, rT)
inputDF.iat[ri, 1] = rT
inputDF.iat[ri, 2] = 1
for vehi, vehb in enumerate(VehSplit):
if vehb == veh:
inputDF.iat[ri, 3+vehi] = 100
else:
inputDF.iat[ri, 3+vehi] = 0
inputDF.iat[ri, len(VehSplit)+3] = sp
inputDF.iat[ri, len(VehSplit)+4] = 1 # 1 hour. Not neccesary for g/km output.
inputDF.iat[ri, len(VehSplit)+5] = 1 # 1 km. Not neccesary either.
logprint(loggerM, ' done', level='debug')
inputData = inputDF.as_matrix()
inputShape = np.shape(inputData)
logprint(loggerM, 'input created with dimensions {} by {}.'.format(inputShape[0], inputShape[1]), level='debug')
return inputData
def getProportions(ws, ColName, ColProp, ColUser, vehRowStarts,
vehRowEnds, mode='Most Vehicles', logger=None):
# Get the logging details.
loggerM = getLogger(logger, 'getProportions')
# Start a pandas dateframe.
df = pd.DataFrame(columns=['vehicle', 'euroname', 'euroclass', 'technology',
'proportion', 'sourceCell', 'userCell'])
for vehi in range(len(vehRowStarts)):
starow = vehRowStarts[vehi]
endrow = vehRowEnds[vehi]
if mode == 'Most Vehicles':
vehName = ws.Range("{}{}".format(ColName, starow-1)).Value
while vehName is None:
vehName = ws.Range("{}{}".format(ColName, starow)).Value
starow += 1
elif mode == 'Motorcycles':
stroke_ = ws.Range("A{}".format(starow)).Value
weight_ = ws.Range("A{}".format(starow+1)).Value
if stroke_ == '0-50cc':
vehName = 'Motorcycle - 0-50cc'
else:
vehName = 'Motorcycle - {} - {}'.format(stroke_, weight_)
elif mode == 'Hybrid Buses':
decker_ = ws.Range("A{}".format(starow)).Value
vehName = 'Hybrid Buses - {}'.format(decker_)
starow += 1 # Grrrrr. Poor formatting in the EFT
endrow += 1
elif mode == 'Weights':
vehName = ws.Range("{}{}".format(ColName, starow-1)).Value
else:
raise ValueError("mode '{}' is not recognised.".format(mode))
for row in range(starow, endrow+1):
euroName = ws.Range("{}{}".format(ColName, row)).Value
if euroName is not None:
sourceCell = "{}{}".format(ColProp, row)
userCell = "{}{}".format(ColUser, row)
proportion = ws.Range(sourceCell).Value
if not isinstance(proportion, float):
logprint(loggerM, 'Bad proportion value "{}" for veh {}, euro {}.'.format(proportion, vehName, euroName), level='info')
sourceCell = "{}{}".format(ColUser, row)
proportion = ws.Range(sourceCell).Value
if not isinstance(proportion, float):
#print(proportion)
raise ValueError('Proportion must be a float.')
else:
logprint(loggerM, 'Fixed. Proportion value {}.'.format(proportion), level='info')
logprint(loggerM, 'vehName: {}, euroName: {}, proportion: {}'.format(vehName, euroName, proportion), level='debug')
got = False
if mode == 'Weights':
euroName = weightClassNameVariations[euroName]
df1 = pd.DataFrame([[vehName, euroName, -99, '--', proportion, sourceCell, userCell]],
columns=['vehicle', 'euroname', 'euroclass',
'technology', 'proportion', 'sourceCell', 'userCell'])
df = df.append(df1, 1)
continue
for euroI, euronames in euroClassNameVariations.items():
if euroI == 99:
continue
if euroName in euronames['All']:
got = True
tech = 'Standard'
for techname, euronamestechs in euronames.items():
if techname == 'All':
continue
if euroName in euronamestechs:
tech = techname
break
df1 = pd.DataFrame([[vehName, euroName, euroI, tech, proportion, sourceCell, userCell]],
columns=['vehicle', 'euroname', 'euroclass',
'technology', 'proportion', 'sourceCell', 'userCell'])
df = df.append(df1, 1)
if not got:
raise ValueError("Can't identify euro class from {}.".format(euroName))
if mode == 'Weights':
df = df.rename(columns={'euroname': 'weightclass'})
df = df.drop('euroclass', 1)
df = df.drop('technology', 1)
#print(df.head())
return df
def readFleetProps(fname, sites=[]):
"""
Read the fleet proportion file and return a dictionary with cell references
and proportions to set.
"""
props = {}
if fname is None:
return props
try:
# Assume excel document like the template.
workbook = xlrd.open_workbook(fname)
sheet = workbook.sheet_by_index(0)
mode = 1
except xlrd.biffh.XLRDError:
# Is it a csv?
sheet = | pd.read_csv(fname) | pandas.read_csv |
'''
Preprocessing Tranformers Based on sci-kit's API
By <NAME>
Created on June 12, 2017
'''
import copy
import pandas as pd
import numpy as np
import transforms3d as t3d
import scipy.ndimage.filters as filters
from sklearn.base import BaseEstimator, TransformerMixin
from analysis.pymo.rotation_tools import Rotation, euler2expmap, euler2expmap2, expmap2euler, euler_reorder, unroll
from analysis.pymo.Quaternions import Quaternions
from analysis.pymo.Pivots import Pivots
class MocapParameterizer(BaseEstimator, TransformerMixin):
def __init__(self, param_type = 'euler'):
'''
param_type = {'euler', 'quat', 'expmap', 'position', 'expmap2pos'}
'''
self.param_type = param_type
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
print("MocapParameterizer: " + self.param_type)
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._to_expmap(X)
elif self.param_type == 'quat':
return X
elif self.param_type == 'position':
return self._to_pos(X)
elif self.param_type == 'expmap2pos':
return self._expmap_to_pos(X)
else:
raise 'param types: euler, quat, expmap, position, expmap2pos'
# return X
def inverse_transform(self, X, copy=None):
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._expmap_to_euler(X)
elif self.param_type == 'quat':
raise 'quat2euler is not supported'
elif self.param_type == 'position':
# raise 'positions 2 eulers is not supported'
print('positions 2 eulers is not supported')
return X
else:
raise 'param types: euler, quat, expmap, position'
def _to_pos(self, X):
'''Converts joints rotations in Euler angles to joint positions'''
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
pos_df = pd.DataFrame(index=euler_df.index)
# Copy the root rotations into the new DataFrame
# rxp = '%s_Xrotation'%track.root_name
# ryp = '%s_Yrotation'%track.root_name
# rzp = '%s_Zrotation'%track.root_name
# pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)
# pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)
# pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)
# List the columns that contain rotation channels
rot_cols = [c for c in euler_df.columns if ('rotation' in c)]
# List the columns that contain position channels
pos_cols = [c for c in euler_df.columns if ('position' in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton)
tree_data = {}
for joint in track.traverse():
parent = track.skeleton[joint]['parent']
rot_order = track.skeleton[joint]['order']
#print("rot_order:" + joint + " :" + rot_order)
# Get the rotation columns that belong to this joint
rc = euler_df[[c for c in rot_cols if joint in c]]
# Get the position columns that belong to this joint
pc = euler_df[[c for c in pos_cols if joint in c]]
# Make sure the columns are organized in xyz order
if rc.shape[1] < 3:
euler_values = np.zeros((euler_df.shape[0], 3))
rot_order = "XYZ"
else:
euler_values = np.pi/180.0*np.transpose(np.array([track.values['%s_%srotation'%(joint, rot_order[0])], track.values['%s_%srotation'%(joint, rot_order[1])], track.values['%s_%srotation'%(joint, rot_order[2])]]))
if pc.shape[1] < 3:
pos_values = np.asarray([[0,0,0] for f in pc.iterrows()])
else:
pos_values =np.asarray([[f[1]['%s_Xposition'%joint],
f[1]['%s_Yposition'%joint],
f[1]['%s_Zposition'%joint]] for f in pc.iterrows()])
quats = Quaternions.from_euler(np.asarray(euler_values), order=rot_order.lower(), world=False)
tree_data[joint]=[
[], # to store the rotation matrix
[] # to store the calculated position
]
if track.root_name == joint:
tree_data[joint][0] = quats#rotmats
# tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])
tree_data[joint][1] = pos_values
else:
# for every frame i, multiply this joint's rotmat to the rotmat of its parent
tree_data[joint][0] = tree_data[parent][0]*quats# np.matmul(rotmats, tree_data[parent][0])
# add the position channel to the offset and store it in k, for every frame i
k = pos_values + np.asarray(track.skeleton[joint]['offsets'])
# multiply k to the rotmat of the parent for every frame i
q = tree_data[parent][0]*k #np.matmul(k.reshape(k.shape[0],1,3), tree_data[parent][0])
# add q to the position of the parent, for every frame i
tree_data[joint][1] = tree_data[parent][1] + q #q.reshape(k.shape[0],3) + tree_data[parent][1]
# Create the corresponding columns in the new DataFrame
pos_df['%s_Xposition'%joint] = pd.Series(data=[e[0] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Yposition'%joint] = pd.Series(data=[e[1] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Zposition'%joint] = pd.Series(data=[e[2] for e in tree_data[joint][1]], index=pos_df.index)
new_track = track.clone()
new_track.values = pos_df
Q.append(new_track)
return Q
def _expmap2rot(self, expmap):
theta = np.linalg.norm(expmap, axis=1, keepdims=True)
nz = np.nonzero(theta)[0]
expmap[nz,:] = expmap[nz,:]/theta[nz]
nrows=expmap.shape[0]
x = expmap[:,0]
y = expmap[:,1]
z = expmap[:,2]
s = np.sin(theta*0.5).reshape(nrows)
c = np.cos(theta*0.5).reshape(nrows)
rotmats = np.zeros((nrows, 3, 3))
rotmats[:,0,0] = 2*(x*x-1)*s*s+1
rotmats[:,0,1] = 2*x*y*s*s-2*z*c*s
rotmats[:,0,2] = 2*x*z*s*s+2*y*c*s
rotmats[:,1,0] = 2*x*y*s*s+2*z*c*s
rotmats[:,1,1] = 2*(y*y-1)*s*s+1
rotmats[:,1,2] = 2*y*z*s*s-2*x*c*s
rotmats[:,2,0] = 2*x*z*s*s-2*y*c*s
rotmats[:,2,1] = 2*y*z*s*s+2*x*c*s
rotmats[:,2,2] = 2*(z*z-1)*s*s+1
return rotmats
def _expmap_to_pos(self, X):
'''Converts joints rotations in expmap notation to joint positions'''
Q = []
for track in X:
channels = []
titles = []
exp_df = track.values
# Create a new DataFrame to store the exponential map rep
pos_df = pd.DataFrame(index=exp_df.index)
# Copy the root rotations into the new DataFrame
# rxp = '%s_Xrotation'%track.root_name
# ryp = '%s_Yrotation'%track.root_name
# rzp = '%s_Zrotation'%track.root_name
# pos_df[rxp] = pd.Series(data=euler_df[rxp], index=pos_df.index)
# pos_df[ryp] = pd.Series(data=euler_df[ryp], index=pos_df.index)
# pos_df[rzp] = pd.Series(data=euler_df[rzp], index=pos_df.index)
# List the columns that contain rotation channels
exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton)
tree_data = {}
for joint in track.traverse():
parent = track.skeleton[joint]['parent']
if 'Nub' not in joint:
r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint
expmap = r.values
#expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()]
else:
expmap = np.zeros((exp_df.shape[0], 3))
# Convert the eulers to rotation matrices
#rotmats = np.asarray([Rotation(f, 'expmap').rotmat for f in expmap])
#angs = np.linalg.norm(expmap,axis=1, keepdims=True)
rotmats = self._expmap2rot(expmap)
tree_data[joint]=[
[], # to store the rotation matrix
[] # to store the calculated position
]
pos_values = np.zeros((exp_df.shape[0], 3))
if track.root_name == joint:
tree_data[joint][0] = rotmats
# tree_data[joint][1] = np.add(pos_values, track.skeleton[joint]['offsets'])
tree_data[joint][1] = pos_values
else:
# for every frame i, multiply this joint's rotmat to the rotmat of its parent
tree_data[joint][0] = np.matmul(rotmats, tree_data[parent][0])
# add the position channel to the offset and store it in k, for every frame i
k = pos_values + track.skeleton[joint]['offsets']
# multiply k to the rotmat of the parent for every frame i
q = np.matmul(k.reshape(k.shape[0],1,3), tree_data[parent][0])
# add q to the position of the parent, for every frame i
tree_data[joint][1] = q.reshape(k.shape[0],3) + tree_data[parent][1]
# Create the corresponding columns in the new DataFrame
pos_df['%s_Xposition'%joint] = pd.Series(data=tree_data[joint][1][:,0], index=pos_df.index)
pos_df['%s_Yposition'%joint] = pd.Series(data=tree_data[joint][1][:,1], index=pos_df.index)
pos_df['%s_Zposition'%joint] = pd.Series(data=tree_data[joint][1][:,2], index=pos_df.index)
new_track = track.clone()
new_track.values = pos_df
Q.append(new_track)
return Q
def _to_expmap(self, X):
'''Converts Euler angles to Exponential Maps'''
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
exp_df = euler_df.copy()# pd.DataFrame(index=euler_df.index)
# Copy the root positions into the new DataFrame
#rxp = '%s_Xposition'%track.root_name
#ryp = '%s_Yposition'%track.root_name
#rzp = '%s_Zposition'%track.root_name
#exp_df[rxp] = pd.Series(data=euler_df[rxp], index=exp_df.index)
#exp_df[ryp] = pd.Series(data=euler_df[ryp], index=exp_df.index)
#exp_df[rzp] = pd.Series(data=euler_df[rzp], index=exp_df.index)
# List the columns that contain rotation channels
rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
for joint in joints:
#print(joint)
r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint
rot_order = track.skeleton[joint]['order']
r1_col = '%s_%srotation'%(joint, rot_order[0])
r2_col = '%s_%srotation'%(joint, rot_order[1])
r3_col = '%s_%srotation'%(joint, rot_order[2])
exp_df.drop([r1_col, r2_col, r3_col], axis=1, inplace=True)
euler = [[f[1][r1_col], f[1][r2_col], f[1][r3_col]] for f in r.iterrows()]
#exps = [Rotation(f, 'euler', from_deg=True, order=rot_order).to_expmap() for f in euler] # Convert the eulers to exp maps
exps = unroll(np.array([euler2expmap(f, rot_order, True) for f in euler])) # Convert the exp maps to eulers
# exps = np.array([euler2expmap(f, rot_order, True) for f in euler]) # Convert the exp maps to eulers
#exps = euler2expmap2(euler, rot_order, True) # Convert the eulers to exp maps
# Create the corresponding columns in the new DataFrame
exp_df.insert(loc=0, column='%s_gamma'%joint, value=pd.Series(data=[e[2] for e in exps], index=exp_df.index))
exp_df.insert(loc=0, column='%s_beta'%joint, value=pd.Series(data=[e[1] for e in exps], index=exp_df.index))
exp_df.insert(loc=0, column='%s_alpha'%joint, value=pd.Series(data=[e[0] for e in exps], index=exp_df.index))
#print(exp_df.columns)
new_track = track.clone()
new_track.values = exp_df
Q.append(new_track)
return Q
def _expmap_to_euler(self, X):
Q = []
for track in X:
channels = []
titles = []
exp_df = track.values
# Create a new DataFrame to store the exponential map rep
#euler_df = pd.DataFrame(index=exp_df.index)
euler_df = exp_df.copy()
# Copy the root positions into the new DataFrame
#rxp = '%s_Xposition'%track.root_name
#ryp = '%s_Yposition'%track.root_name
#rzp = '%s_Zposition'%track.root_name
#euler_df[rxp] = pd.Series(data=exp_df[rxp], index=euler_df.index)
#euler_df[ryp] = pd.Series(data=exp_df[ryp], index=euler_df.index)
#euler_df[rzp] = pd.Series(data=exp_df[rzp], index=euler_df.index)
# List the columns that contain rotation channels
exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
for joint in joints:
r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint
euler_df.drop(['%s_alpha'%joint, '%s_beta'%joint, '%s_gamma'%joint], axis=1, inplace=True)
expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()] # Make sure the columsn are organized in xyz order
rot_order = track.skeleton[joint]['order']
#euler_rots = [Rotation(f, 'expmap').to_euler(True, rot_order) for f in expmap] # Convert the exp maps to eulers
euler_rots = [expmap2euler(f, rot_order, True) for f in expmap] # Convert the exp maps to eulers
# Create the corresponding columns in the new DataFrame
euler_df['%s_%srotation'%(joint, rot_order[0])] = pd.Series(data=[e[0] for e in euler_rots], index=euler_df.index)
euler_df['%s_%srotation'%(joint, rot_order[1])] = pd.Series(data=[e[1] for e in euler_rots], index=euler_df.index)
euler_df['%s_%srotation'%(joint, rot_order[2])] = pd.Series(data=[e[2] for e in euler_rots], index=euler_df.index)
new_track = track.clone()
new_track.values = euler_df
Q.append(new_track)
return Q
class Mirror(BaseEstimator, TransformerMixin):
def __init__(self, axis="X", append=True):
"""
Mirrors the data
"""
self.axis = axis
self.append = append
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
print("Mirror: " + self.axis)
Q = []
if self.append:
for track in X:
Q.append(track)
for track in X:
channels = []
titles = []
if self.axis == "X":
signs = np.array([1,-1,-1])
if self.axis == "Y":
signs = np.array([-1,1,-1])
if self.axis == "Z":
signs = np.array([-1,-1,1])
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
new_df = pd.DataFrame(index=euler_df.index)
# Copy the root positions into the new DataFrame
rxp = '%s_Xposition'%track.root_name
ryp = '%s_Yposition'%track.root_name
rzp = '%s_Zposition'%track.root_name
new_df[rxp] = pd.Series(data=-signs[0]*euler_df[rxp], index=new_df.index)
new_df[ryp] = pd.Series(data=-signs[1]*euler_df[ryp], index=new_df.index)
new_df[rzp] = pd.Series(data=-signs[2]*euler_df[rzp], index=new_df.index)
# List the columns that contain rotation channels
rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
#lft_rots = [c for c in euler_df.columns if ('Left' in c and 'rotation' in c and 'Nub' not in c)]
#rgt_rots = [c for c in euler_df.columns if ('Right' in c and 'rotation' in c and 'Nub' not in c)]
lft_joints = (joint for joint in track.skeleton if 'Left' in joint and 'Nub' not in joint)
rgt_joints = (joint for joint in track.skeleton if 'Right' in joint and 'Nub' not in joint)
new_track = track.clone()
for lft_joint in lft_joints:
#lr = euler_df[[c for c in rots if lft_joint + "_" in c]]
#rot_order = track.skeleton[lft_joint]['order']
#lft_eulers = [[f[1]['%s_Xrotation'%lft_joint], f[1]['%s_Yrotation'%lft_joint], f[1]['%s_Zrotation'%lft_joint]] for f in lr.iterrows()]
rgt_joint = lft_joint.replace('Left', 'Right')
#rr = euler_df[[c for c in rots if rgt_joint + "_" in c]]
#rot_order = track.skeleton[rgt_joint]['order']
# rgt_eulers = [[f[1]['%s_Xrotation'%rgt_joint], f[1]['%s_Yrotation'%rgt_joint], f[1]['%s_Zrotation'%rgt_joint]] for f in rr.iterrows()]
# Create the corresponding columns in the new DataFrame
new_df['%s_Xrotation'%lft_joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%rgt_joint], index=new_df.index)
new_df['%s_Yrotation'%lft_joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%rgt_joint], index=new_df.index)
new_df['%s_Zrotation'%lft_joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%rgt_joint], index=new_df.index)
new_df['%s_Xrotation'%rgt_joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%lft_joint], index=new_df.index)
new_df['%s_Yrotation'%rgt_joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%lft_joint], index=new_df.index)
new_df['%s_Zrotation'%rgt_joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%lft_joint], index=new_df.index)
# List the joints that are not left or right, i.e. are on the trunk
joints = (joint for joint in track.skeleton if 'Nub' not in joint and 'Left' not in joint and 'Right' not in joint)
for joint in joints:
#r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint
#rot_order = track.skeleton[joint]['order']
#eulers = [[f[1]['%s_Xrotation'%joint], f[1]['%s_Yrotation'%joint], f[1]['%s_Zrotation'%joint]] for f in r.iterrows()]
# Create the corresponding columns in the new DataFrame
new_df['%s_Xrotation'%joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%joint], index=new_df.index)
new_df['%s_Yrotation'%joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%joint], index=new_df.index)
new_df['%s_Zrotation'%joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%joint], index=new_df.index)
new_track.values = new_df
Q.append(new_track)
return Q
def inverse_transform(self, X, copy=None, start_pos=None):
return X
class EulerReorder(BaseEstimator, TransformerMixin):
def __init__(self, new_order):
"""
Add a
"""
self.new_order = new_order
def fit(self, X, y=None):
self.orig_skeleton = copy.deepcopy(X[0].skeleton)
print(self.orig_skeleton)
return self
def transform(self, X, y=None):
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
new_df = pd.DataFrame(index=euler_df.index)
# Copy the root positions into the new DataFrame
rxp = '%s_Xposition'%track.root_name
ryp = '%s_Yposition'%track.root_name
rzp = '%s_Zposition'%track.root_name
new_df[rxp] = pd.Series(data=euler_df[rxp], index=new_df.index)
new_df[ryp] = pd.Series(data=euler_df[ryp], index=new_df.index)
new_df[rzp] = pd.Series(data=euler_df[rzp], index=new_df.index)
# List the columns that contain rotation channels
rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
new_track = track.clone()
for joint in joints:
r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint
rot_order = track.skeleton[joint]['order']
euler = [[f[1]['%s_Xrotation'%(joint)], f[1]['%s_Yrotation'%(joint)], f[1]['%s_Zrotation'%(joint)]] for f in r.iterrows()]
new_euler = [euler_reorder(f, rot_order, self.new_order, True) for f in euler]
#new_euler = euler_reorder2(np.array(euler), rot_order, self.new_order, True)
# Create the corresponding columns in the new DataFrame
new_df['%s_%srotation'%(joint, self.new_order[0])] = pd.Series(data=[e[0] for e in new_euler], index=new_df.index)
new_df['%s_%srotation'%(joint, self.new_order[1])] = pd.Series(data=[e[1] for e in new_euler], index=new_df.index)
new_df['%s_%srotation'%(joint, self.new_order[2])] = pd.Series(data=[e[2] for e in new_euler], index=new_df.index)
new_track.skeleton[joint]['order'] = self.new_order
new_track.values = new_df
Q.append(new_track)
return Q
def inverse_transform(self, X, copy=None, start_pos=None):
return X
# Q = []
#
# for track in X:
# channels = []
# titles = []
# euler_df = track.values
#
# # Create a new DataFrame to store the exponential map rep
# new_df = pd.DataFrame(index=euler_df.index)
#
# # Copy the root positions into the new DataFrame
# rxp = '%s_Xposition'%track.root_name
# ryp = '%s_Yposition'%track.root_name
# rzp = '%s_Zposition'%track.root_name
# new_df[rxp] = pd.Series(data=euler_df[rxp], index=new_df.index)
# new_df[ryp] = pd.Series(data=euler_df[ryp], index=new_df.index)
# new_df[rzp] = pd.Series(data=euler_df[rzp], index=new_df.index)
#
# # List the columns that contain rotation channels
# rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
#
# # List the joints that are not end sites, i.e., have channels
# joints = (joint for joint in track.skeleton if 'Nub' not in joint)
#
# new_track = track.clone()
# for joint in joints:
# r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint
# rot_order = track.skeleton[joint]['order']
# new_order = self.orig_skeleton[joint]['order']
# print("rot_order:" + str(rot_order))
# print("new_order:" + str(new_order))
#
# euler = [[f[1]['%s_%srotation'%(joint, rot_order[0])], f[1]['%s_%srotation'%(joint, rot_order[1])], f[1]['%s_%srotation'%(joint, rot_order[2])]] for f in r.iterrows()]
# #new_euler = [euler_reorder(f, rot_order, new_order, True) for f in euler]
# new_euler = euler_reorder2(np.array(euler), rot_order, self.new_order, True)
#
# # Create the corresponding columns in the new DataFrame
# new_df['%s_%srotation'%(joint, new_order[0])] = pd.Series(data=[e[0] for e in new_euler], index=new_df.index)
# new_df['%s_%srotation'%(joint, new_order[1])] = pd.Series(data=[e[1] for e in new_euler], index=new_df.index)
# new_df['%s_%srotation'%(joint, new_order[2])] = pd.Series(data=[e[2] for e in new_euler], index=new_df.index)
#
# new_track.skeleton[joint]['order'] = new_order
#
# new_track.values = new_df
# Q.append(new_track)
# return Q
class JointSelector(BaseEstimator, TransformerMixin):
'''
Allows for filtering the mocap data to include only the selected joints
'''
def __init__(self, joints, include_root=False):
self.joints = joints
self.include_root = include_root
def fit(self, X, y=None):
selected_joints = []
selected_channels = []
if self.include_root:
selected_joints.append(X[0].root_name)
selected_joints.extend(self.joints)
for joint_name in selected_joints:
selected_channels.extend([o for o in X[0].values.columns if (joint_name + "_") in o and 'Nub' not in o])
self.selected_joints = selected_joints
self.selected_channels = selected_channels
self.not_selected = X[0].values.columns.difference(selected_channels)
self.not_selected_values = {c:X[0].values[c].values[0] for c in self.not_selected}
self.orig_skeleton = X[0].skeleton
return self
def transform(self, X, y=None):
print("JointSelector")
Q = []
for track in X:
t2 = track.clone()
for key in track.skeleton.keys():
if key not in self.selected_joints:
parent = t2.skeleton[key]['parent']
if parent in t2.skeleton:
t2.skeleton[parent]['children'].remove(key)
t2.skeleton.pop(key)
t2.values = track.values[self.selected_channels]
Q.append(t2)
return Q
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
t2 = track.clone()
t2.skeleton = self.orig_skeleton
for d in self.not_selected:
t2.values[d] = self.not_selected_values[d]
Q.append(t2)
return Q
class Numpyfier(BaseEstimator, TransformerMixin):
'''
Just converts the values in a MocapData object into a numpy array
Useful for the final stage of a pipeline before training
'''
def __init__(self):
pass
def fit(self, X, y=None):
self.org_mocap_ = X[0].clone()
self.org_mocap_.values.drop(self.org_mocap_.values.index, inplace=True)
return self
def transform(self, X, y=None):
print("Numpyfier")
Q = []
for track in X:
Q.append(track.values.values)
#print("Numpyfier:" + str(track.values.columns))
return np.array(Q)
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
new_mocap = self.org_mocap_.clone()
time_index = pd.to_timedelta([f for f in range(track.shape[0])], unit='s')
# print(self.org_mocap_.values.columns)
# import pdb;pdb.set_trace()
new_df = pd.DataFrame(data=track, index=time_index, columns=self.org_mocap_.values.columns)
new_mocap.values = new_df
Q.append(new_mocap)
return Q
class Slicer(BaseEstimator, TransformerMixin):
'''
Slice the data into intervals of equal size
'''
def __init__(self, window_size, overlap=0.5):
self.window_size = window_size
self.overlap = overlap
pass
def fit(self, X, y=None):
self.org_mocap_ = X[0].clone()
self.org_mocap_.values.drop(self.org_mocap_.values.index, inplace=True)
return self
def transform(self, X, y=None):
print("Slicer")
Q = []
for track in X:
vals = track.values.values
nframes = vals.shape[0]
overlap_frames = (int)(self.overlap*self.window_size)
n_sequences = (nframes-overlap_frames)//(self.window_size-overlap_frames)
if n_sequences>0:
y = np.zeros((n_sequences, self.window_size, vals.shape[1]))
# extract sequences from the input data
for i in range(0,n_sequences):
frameIdx = (self.window_size-overlap_frames) * i
Q.append(vals[frameIdx:frameIdx+self.window_size,:])
return np.array(Q)
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
new_mocap = self.org_mocap_.clone()
time_index = pd.to_timedelta([f for f in range(track.shape[0])], unit='s')
new_df = pd.DataFrame(data=track, index=time_index, columns=self.org_mocap_.values.columns)
new_mocap.values = new_df
Q.append(new_mocap)
return Q
class RootTransformer(BaseEstimator, TransformerMixin):
def __init__(self, method, position_smoothing=0, rotation_smoothing=0):
"""
Accepted methods:
abdolute_translation_deltas
pos_rot_deltas
"""
self.method = method
self.position_smoothing=position_smoothing
self.rotation_smoothing=rotation_smoothing
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
print("RootTransformer")
Q = []
for track in X:
if self.method == 'abdolute_translation_deltas':
new_df = track.values.copy()
xpcol = '%s_Xposition'%track.root_name
ypcol = '%s_Yposition'%track.root_name
zpcol = '%s_Zposition'%track.root_name
dxpcol = '%s_dXposition'%track.root_name
dzpcol = '%s_dZposition'%track.root_name
x=track.values[xpcol].copy()
z=track.values[zpcol].copy()
if self.position_smoothing>0:
x_sm = filters.gaussian_filter1d(x, self.position_smoothing, axis=0, mode='nearest')
z_sm = filters.gaussian_filter1d(z, self.position_smoothing, axis=0, mode='nearest')
dx = pd.Series(data=x_sm, index=new_df.index).diff()
dz = pd.Series(data=z_sm, index=new_df.index).diff()
new_df[xpcol] = x-x_sm
new_df[zpcol] = z-z_sm
else:
dx = x.diff()
dz = z.diff()
new_df.drop([xpcol, zpcol], axis=1, inplace=True)
dx[0] = dx[1]
dz[0] = dz[1]
new_df[dxpcol] = dx
new_df[dzpcol] = dz
new_track = track.clone()
new_track.values = new_df
# end of abdolute_translation_deltas
elif self.method == 'pos_rot_deltas':
new_track = track.clone()
# Absolute columns
xp_col = '%s_Xposition'%track.root_name
yp_col = '%s_Yposition'%track.root_name
zp_col = '%s_Zposition'%track.root_name
#rot_order = track.skeleton[track.root_name]['order']
#%(joint, rot_order[0])
rot_order = track.skeleton[track.root_name]['order']
r1_col = '%s_%srotation'%(track.root_name, rot_order[0])
r2_col = '%s_%srotation'%(track.root_name, rot_order[1])
r3_col = '%s_%srotation'%(track.root_name, rot_order[2])
# Delta columns
dxp_col = '%s_dXposition'%track.root_name
dzp_col = '%s_dZposition'%track.root_name
dxr_col = '%s_dXrotation'%track.root_name
dyr_col = '%s_dYrotation'%track.root_name
dzr_col = '%s_dZrotation'%track.root_name
positions = np.transpose(np.array([track.values[xp_col], track.values[yp_col], track.values[zp_col]]))
rotations = np.pi/180.0*np.transpose(np.array([track.values[r1_col], track.values[r2_col], track.values[r3_col]]))
""" Get Trajectory and smooth it"""
trajectory_filterwidth = self.position_smoothing
reference = positions.copy()*np.array([1,0,1])
if trajectory_filterwidth>0:
reference = filters.gaussian_filter1d(reference, trajectory_filterwidth, axis=0, mode='nearest')
""" Get Root Velocity """
velocity = np.diff(reference, axis=0)
velocity = np.vstack((velocity[0,:], velocity))
""" Remove Root Translation """
positions = positions-reference
""" Get Forward Direction along the x-z plane, assuming character is facig z-forward """
#forward = [Rotation(f, 'euler', from_deg=True, order=rot_order).rotmat[:,2] for f in rotations] # get the z-axis of the rotation matrix, assuming character is facig z-forward
#print("order:" + rot_order.lower())
quats = Quaternions.from_euler(rotations, order=rot_order.lower(), world=False)
forward = quats*np.array([[0,0,1]])
forward[:,1] = 0
""" Smooth Forward Direction """
direction_filterwidth = self.rotation_smoothing
if direction_filterwidth>0:
forward = filters.gaussian_filter1d(forward, direction_filterwidth, axis=0, mode='nearest')
forward = forward / np.sqrt((forward**2).sum(axis=-1))[...,np.newaxis]
""" Remove Y Rotation """
target = np.array([[0,0,1]]).repeat(len(forward), axis=0)
rotation = Quaternions.between(target, forward)[:,np.newaxis]
positions = (-rotation[:,0]) * positions
new_rotations = (-rotation[:,0]) * quats
velocity = (-rotation[:,0]) * velocity
""" Get Root Rotation """
#print(rotation[:,0])
rvelocity = Pivots.from_quaternions(rotation[1:] * -rotation[:-1]).ps
rvelocity = np.vstack((rvelocity[0], rvelocity))
eulers = np.array([t3d.euler.quat2euler(q, axes=('s'+rot_order.lower()[::-1]))[::-1] for q in new_rotations])*180.0/np.pi
new_df = track.values.copy()
root_pos_x = pd.Series(data=positions[:,0], index=new_df.index)
root_pos_y = pd.Series(data=positions[:,1], index=new_df.index)
root_pos_z = pd.Series(data=positions[:,2], index=new_df.index)
root_pos_x_diff = | pd.Series(data=velocity[:,0], index=new_df.index) | pandas.Series |
import numpy as np
import pandas as pd
###################################################
## Building Relational tables
###################################################
# Population (in millions)
population_dict = {'California': 38,
'Texas': 26,
'New York': 20,
'Florida': 19,
'Illinois': 13}
# Area in square kilometers
area_dict = {'California': 423967,
'Texas': 695662,
'New York': 141297,
'Florida': 170312,
'Illinois': 149995}
largestcity_dict = {'California': 'Los Angeles',
'Texas': 'Houston',
'New York': 'New York',
'Florida': 'Jacksonville',
'Illinois': 'Chicago'}
# Just one column in a table
print('************** JUST 1 COLUMN ****************')
population = pd.Series(population_dict)
print(population)
print(population['California':'New York'])
print('************** WIDER DATAFRAME ****************')
# Combine DataFrame from several Series
area = pd.Series(area_dict)
city = pd.Series(largestcity_dict)
USStateDF = pd.DataFrame({'population': population,
'area': area, 'city':city})
print(USStateDF)
print('************** GENERATED DATAFRAME ****************')
data = [{'a': i, 'b': 2 * i} for i in range(5)]
numberDF = pd.DataFrame(data)
print(numberDF)
########################################################
## Project operation
########################################################
print('************** BEFORE PROJECT ****************')
print(USStateDF)
print('************** AFTER PROJECT ****************')
ProjectedDF = USStateDF[['population', 'city']]
print(ProjectedDF)
########################################################
## Union of two dataframes
########################################################
print('************** UNION ****************')
ser1 = | pd.Series(['A', 'B', 'C'], index=[1, 2, 3]) | pandas.Series |
"""
Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
"""
from datetime import (
date,
datetime,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.offsets import (
CBMonthBegin,
CBMonthEnd,
CDay,
SemiMonthBegin,
SemiMonthEnd,
)
from pandas import (
DatetimeIndex,
Series,
_testing as tm,
date_range,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
class CustomBusinessMonthBase:
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask="Mon Wed Fri")
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthEnd>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, d, expected = case
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
(
2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31),
},
),
(
-CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31),
},
),
(
-2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31),
},
),
(
CBMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-01-31", datetime(2012, 2, 28), np.datetime64("2012-02-29")]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=freq).tolist()[
0
] == datetime(2012, 1, 31)
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_offset = CBMonthBegin
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthBegin>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthBegins>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
apply_cases: _ApplyCases = [
(
CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3),
},
),
(
2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1),
},
),
(
-CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1),
},
),
(
-2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1),
},
),
(
CBMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-02-01", datetime(2012, 2, 2), np.datetime64("2012-03-01")]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=cbmb).tolist()[
0
] == datetime(2012, 1, 3)
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (
datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31),
)
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthEnd(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthEnd() + s
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SM")
exp = DatetimeIndex(dates, freq="SM")
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append(
(
SemiMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(day_of_month=20),
{
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 20),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 20),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 16): datetime(2008, 1, 31),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 15),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0, day_of_month=16),
{
datetime(2008, 1, 1): datetime(2008, 1, 16),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 16),
},
)
)
offset_cases.append(
(
SemiMonthEnd(2),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 11, 30),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 30): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1, day_of_month=4),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2007, 1, 4): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-2),
{
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 2, 15),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 14): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 15),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize("case", offset_cases)
def test_apply_index(self, case):
# https://github.com/pandas-dev/pandas/issues/34580
offset, cases = case
s = DatetimeIndex(cases.keys())
exp = DatetimeIndex(cases.values())
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = offset + s
tm.assert_index_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = offset.apply_index(s)
| tm.assert_index_equal(result, exp) | pandas._testing.assert_index_equal |
import numpy as np
import pytest
import pandas._libs.index as _index
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestMultiIndexBasic:
def test_multiindex_perf_warn(self):
df = DataFrame(
{
"jim": [0, 0, 1, 1],
"joe": ["x", "x", "z", "y"],
"jolie": np.random.rand(4),
}
).set_index(["jim", "joe"])
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(1, "z")]
df = df.iloc[[2, 1, 3, 0]]
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(0,)]
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n), MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
assert s[("a", 5)] == 5
assert s[("a", 6)] == 6
assert s[("a", 7)] == 7
_index._SIZE_CUTOFF = old_cutoff
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame(
{
"a": ["R1", "R2", np.nan, "R4"],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20],
}
)
result = df.set_index(["a", "b"], drop=False)
expected = DataFrame(
{
"a": ["R1", "R2", np.nan, "R4"],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20],
},
index=[
| Index(["R1", "R2", np.nan, "R4"], name="a") | pandas.Index |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.